diff --git a/.changelog/config.toml b/.changelog/config.toml deleted file mode 100644 index 598fd03941..0000000000 --- a/.changelog/config.toml +++ /dev/null @@ -1,79 +0,0 @@ -# The GitHub URL for your project. -# -# This is mainly necessary if you need to automatically generate changelog -# entries directly from the CLI. Right now we only support GitHub, but if -# anyone wants GitLab support please let us know and we'll try implement it -# too. -project_url = "https://github.com/informalsystems/ibc-rs" - -# The file to use as a Handlebars template for changes added directly through -# the CLI. -# -# Assumes that relative paths are relative to the `.changelog` folder. If this -# file does not exist, a default template will be used. -change_template = "change-template.md" - -# The number of characters at which to wrap entries automatically added from -# the CLI. -wrap = 80 - -# The heading right at the beginning of the changelog. -heading = "# CHANGELOG" - -# What style of bullet to use for the instances where unclog has to generate -# bullets for you. Can be "-" or "*". -bullet_style = "-" - -# The message to output when your changelog has no entries yet. -empty_msg = "Nothing to see here! Add some entries to get started." - -# The name of the file (relative to the `.changelog` directory) to use as an -# epilogue for your changelog (will be appended as-is to the end of your -# generated changelog). -epilogue_filename = "epilogue.md" - - -# Settings relating to unreleased changelog entries. -[unreleased] - -# The name of the folder containing unreleased entries, relative to the -# `.changelog` folder. -folder = "unreleased" - -# The heading to use for the unreleased entries section. -heading = "## Unreleased" - - -# Settings relating to sets (groups) of changes in the changelog. For example, -# the "BREAKING CHANGES" section would be considered a change set. -[change_sets] - -# The filename containing a summary of the intended changes. Relative to the -# change set folder (e.g. `.changelog/unreleased/breaking-changes/summary.md`). -summary_filename = "summary.md" - -# The extension of files in a change set. -entry_ext = "md" - - -# Settings related to components/sub-modules. Only relevant if you make use of -# components/sub-modules. -[components] - -# The title to use for the section of entries not relating to a specific -# component. -general_entries_title = "General" - -# The number of spaces to inject before each component-related entry. -entry_indent = 2 - - # The components themselves. Each component has a name (used when rendered - # to Markdown) and a path relative to the project folder (i.e. relative to - # the parent of the `.changelog` folder). - [components.all] - ibc = { name = "IBC Modules", path = "modules" } - ibc-proto = { name = "IBC Proto", path = "proto" } - ibc-relayer = { name = "Relayer Library", path = "relayer" } - ibc-relayer-cli = { name = "Relayer CLI", path = "relayer-cli" } - ibc-integration-test = { name = "Integration Test Framework", path = "tools/test-framework" } - guide = { name = "Guide", path = "guide" } diff --git a/.changelog/epilogue.md b/.changelog/epilogue.md deleted file mode 100644 index 9c812992ba..0000000000 --- a/.changelog/epilogue.md +++ /dev/null @@ -1,1081 +0,0 @@ -## v0.6.1 -*July 22nd, 2021* - -This minor release mainly improves the reliability of the relayer -by ensuring that pending packets are cleared on start, -and that Hermes can recover from the WebSocket subscriptions -being closed under its feet by Tendermint. - -Upgrading from version `0.6.0` to `0.6.1` requires no explicit steps. - -> **WARNING:** Due to a regression ([#1229]), the `upgrade client`, -> `tx raw upgrade-clients`, and `tx raw upgrade-chain` commands have -> been temporarily disabled in this version. -> These commands will be re-enabled in the next version. - -### FEATURES - -- [ibc] - - Enable `pub` access to verification methods of ICS 03 & 04 ([#1198]) - - Add `ics26_routing::handler::decode` function ([#1194]) - - Add a pseudo root to `MockConsensusState` ([#1215]) - -### IMPROVEMENTS - -- [ibc-relayer-cli] - - Add CLI git hash ([#1094]) - - Fix unwraps in `packet query` CLIs ([#1114]) - -### BUG FIXES - -- [ibc] - - Fix stack overflow in `MockHeader` implementation ([#1192]) - - Align `as_str` and `from_str` behavior in `ClientType` ([#1192]) - -- [ibc-relayer] - - Ensure pending packets are cleared on start ([#1200]) - - Recover from missed RPC events after WebSocket subscription is closed by Tendermint ([#1196]) - - -[#1094]: https://github.com/informalsystems/ibc-rs/issues/1094 -[#1114]: https://github.com/informalsystems/ibc-rs/issues/1114 -[#1192]: https://github.com/informalsystems/ibc-rs/issues/1192 -[#1194]: https://github.com/informalsystems/ibc-rs/issues/1194 -[#1196]: https://github.com/informalsystems/ibc-rs/issues/1196 -[#1198]: https://github.com/informalsystems/ibc-rs/issues/1198 -[#1200]: https://github.com/informalsystems/ibc-rs/issues/1200 -[#1215]: https://github.com/informalsystems/ibc-rs/issues/1215 -[#1229]: https://github.com/informalsystems/ibc-rs/issues/1229 - - -## v0.6.0 -*July 12th, 2021* - - -Many thanks to Fraccaroli Gianmarco (@Fraccaman) for helping us improve the -reliability of Hermes ([#697]). - -This release includes two major features to Hermes: (1) support for reloading -the chains from the configuration file at runtime, and (2) a filtering mechanism -to restrict Hermes activity based on predefined parameters (e.g., packet relaying -on certain ports and channels exclusively, and ignoring activity for clients -that have non-standard trust threshold). - -In addition to these two, we have also added a health checkup mechanism, plus new -`config validate` and `query channel ends` CLIs. - -### Upgrading from 0.5.0 to 0.6.0 - -When upgrading from Hermes v0.5.0 to v0.6.0, the most important -point to watch out for is the configuration file. -The Hermes config.toml configuration file has went through a few revisions, -with the changes described below. - -#### Added inline documentation for all options. - -Please have a look around the [config.toml](https://github.com/informalsystems/ibc-rs/blob/v0.6.0/config.toml) directly. - -#### Added a packet filtering mechanism based on channel/port identifiers - -This feature will restrict the channels on which Hermes relays packets. -There are two new options in the configuration file: - -1. A global `filter` parameter to enable or disable filtering globally. -2. A per-chain `.filters` option that expects a `policy` (either `allow` or - `deny`) plus a list of channel and - port identifiers. If policy is `allow`, then packet relaying will be restricted to this - list for the corresponding chain. If the policy is `deny`, then any packets - from this list will be ignored. - -#### Added filtering based on client state - -The global `filter` option additionally enables filtering of all activities -based on client state trust threshold. If enabled, Hermes will ignore all -activity for clients that have a trust threshold different than `1/3`. - -#### Added a packet clearing configuration option - -This will enable the parametrization of the frequency -at which Hermes will clear pending packets. This is a global option, called -`clear_packets_interval`, which applies to all chains in the configuration. - - -The full list of changes is described below. - -### FEATURES - -- [ibc-relayer] - - The chains configuration can be reloaded by sending the Hermes process a `SIGHUP` signal ([#1117]) - - Added support for filtering based on client state trust threshold ([#1165]) - -- [ibc-relayer-cli] - - Added `config validate` CLI to Hermes ([#600]) - - Added filtering capability to deny or allow for specific channels ([#1140], [#1141], [#69]) - - Added basic channel filter ([#1140]) - - Added `query channel ends` CLI command ([#1062]) - - Added a health checkup mechanism for Hermes ([#697, #1057]) - -### IMPROVEMENTS - -- Update to `tendermint-rs` v0.20.0 ([#1125]) -- Add inline documentation to config.toml ([#1127]) - -- [ibc-relayer] - - Hermes will now clear pending packets at a configurable interval ([#1124]) - -### BUG FIXES - -- [ibc-relayer] - - Fix for schedule refreshing bug ([#1143]) - - -[#69]: https://github.com/informalsystems/ibc-rs/issues/69 -[#600]: https://github.com/informalsystems/ibc-rs/issues/600 -[#697]: https://github.com/informalsystems/ibc-rs/issues/697 -[#1062]: https://github.com/informalsystems/ibc-rs/issues/1062 -[#1117]: https://github.com/informalsystems/ibc-rs/issues/1117 -[#1057]: https://github.com/informalsystems/ibc-rs/issues/1057 -[#1125]: https://github.com/informalsystems/ibc-rs/issues/1125 -[#1124]: https://github.com/informalsystems/ibc-rs/issues/1124 -[#1127]: https://github.com/informalsystems/ibc-rs/issues/1127 -[#1140]: https://github.com/informalsystems/ibc-rs/issues/1140 -[#1141]: https://github.com/informalsystems/ibc-rs/issues/1141 -[#1143]: https://github.com/informalsystems/ibc-rs/issues/1143 -[#1165]: https://github.com/informalsystems/ibc-rs/issues/1165 - - -## v0.5.0 -*June 22nd, 2021* - -This release brings a few features, and several improvements and bug fixes to the Hermes -relayer, notably the capability for Hermes to complete IBC connection handshakes when -it detects that one has been initialized, as well as the ability to detect chain -impersonation attacks and to dynamically estimate the gas needed to submit -a transaction. - -Moreover, the overall reliability and availability of the relayer has also been improved -substantially by switching over to `tx_broadcast_sync` for submitting transactions. - -### FEATURES - -- [ibc-relayer-cli] - - Add `--hd-path` option to `keys restore` and `keys add` commands to specify - derivation path when importing keys ([#1049]) - -- [ibc-relayer] - - Event-based handshake completion for IBC connections ([#821]) - - Enable TLS support for gRPC client ([#877]) - -### IMPROVEMENTS - -- [ibc-relayer-cli] - - Minor log output improvements: color enabled, reduced redundant information ([#1100]) - -- [ibc-relayer] - - Update the on-chain IBC client with supporting headers when light client verification - performs bisection when verifying a header for a client update or a misbehaviour detection ([#673]) - - Add mitigation for chain impersonation attacks ([#1038]) - - Determine gas fee dynamically per transaction ([#930]) - - Submit transactions with `broadcast_tx_sync` and keep track of account sequences ([#986]) - -### BUG FIXES - -- [gaiad-manager] - - Removed the testnet command as not all networks support it ([#1050]) - - Update for compatibility with Hermes's new `--hd-path` option - -- [ibc-relayer] - - Fix bug where channels were left partially open after `channel create` ([#1064]) - - Prevent account sequence mismatch errors in many cases ([#919], [#978]) - - Prevent timeouts when submitting transactins ([#977]) - -### BREAKING CHANGES - -- [ibc-relayer-cli] - - Removed `--coin-type` option from `keys restore` command. Use `--hd-path` instead ([#1049]) - -[#673]: https://github.com/informalsystems/ibc-rs/issues/673 -[#821]: https://github.com/informalsystems/ibc-rs/issues/821 -[#877]: https://github.com/informalsystems/ibc-rs/issues/877 -[#919]: https://github.com/informalsystems/ibc-rs/issues/919 -[#930]: https://github.com/informalsystems/ibc-rs/issues/930 -[#977]: https://github.com/informalsystems/ibc-rs/issues/977 -[#978]: https://github.com/informalsystems/ibc-rs/issues/978 -[#986]: https://github.com/informalsystems/ibc-rs/issues/986 -[#1038]: https://github.com/informalsystems/ibc-rs/issues/1038 -[#1049]: https://github.com/informalsystems/ibc-rs/issues/1049 -[#1050]: https://github.com/informalsystems/ibc-rs/issues/1050 -[#1064]: https://github.com/informalsystems/ibc-rs/issues/1064 -[#1100]: https://github.com/informalsystems/ibc-rs/issues/1100 - -## v0.4.0 -*June 3rd, 2021* - -- This release of Hermes features an internal [telemetry service][telemetry] - which can export metrics about the relayer to Prometheus. -- A new [relaying strategy][strategy] is now available, which enables Hermes to - complete channel handshakes in an event-based fashion. -- Hermes now checks if another relayer may have already processed a packet event, - and will not attempt to process it itself, which improves performance. -- The startup time of the relayer has been substantially improved. -- The `start-multi` command has been promoted to `start`, which means - that the worker-based relayer is not experimental anymore. -- A regression where Hermes would not recover after a node went down and up again was fixed. - -[telemetry]: https://hermes.informal.systems/telemetry.html -[strategy]: http://hermes.informal.systems/config.html?highlight=strategy#global - -> Special thanks to Colin Axnér (@colin-axner) and Jongwhan Lee (@leejw51crypto) -> for raising multiple issues that helped us improve the reliability of Hermes. - -### FEATURES - -- [ibc-relayer] - - Add telemetry and Prometheus endpoint ([#868], [#1032]) - - Add support for event based channel relaying ([#822]) - - Graceful handling of packet events in the presence of multiple relayers ([#983]) - -### IMPROVEMENTS - -- [ibc] - - Started `unwrap` cleanup ([#871]) - -- [ibc-relayer-cli] - - Include chain-id in `query clients` command, and sort output by client counter ([#992]) - - Improve config loading message ([#996]) - - Improve Hermes worker spawn time for `start` command ([#998]) - - Better Hermes help message when command is unrecognized ([#1003]) - -### BUG FIXES - -- [ibc-relayer] - - Fix client worker initialization error ([#972]) - - Fix `hermes start` panic when all chains are unreachable ([#972]) - - Ensure expired or frozen client worker logs message and terminates ([#1022]) - - Fix regression where Hermes would not recover after a node went down and up again ([#1026]) - -- [gaiad-manager] - - Import hermes keys properly even if wallet HD derivation path is set ([#975]) - - Apply default values to missing configuration parameters ([#993]) - - `gm hermes config` now creates hermes 0.4.0 compatible configuration ([#1039]) - -### BREAKING CHANGES - -- [ibc-relayer-cli] - - Promote `start-multi` command to `start` ([#911]) - -[#822]: https://github.com/informalsystems/ibc-rs/issues/822 -[#868]: https://github.com/informalsystems/ibc-rs/issues/868 -[#871]: https://github.com/informalsystems/ibc-rs/issues/871 -[#911]: https://github.com/informalsystems/ibc-rs/issues/911 -[#972]: https://github.com/informalsystems/ibc-rs/issues/972 -[#975]: https://github.com/informalsystems/ibc-rs/issues/975 -[#983]: https://github.com/informalsystems/ibc-rs/issues/983 -[#992]: https://github.com/informalsystems/ibc-rs/issues/992 -[#996]: https://github.com/informalsystems/ibc-rs/issues/996 -[#993]: https://github.com/informalsystems/ibc-rs/issues/993 -[#998]: https://github.com/informalsystems/ibc-rs/issues/998 -[#1003]: https://github.com/informalsystems/ibc-rs/issues/1003 -[#1022]: https://github.com/informalsystems/ibc-rs/issues/1022 -[#1026]: https://github.com/informalsystems/ibc-rs/issues/1026 -[#1032]: https://github.com/informalsystems/ibc-rs/issues/1032 -[gaiad-manager]: https://github.com/informalsystems/ibc-rs/blob/master/scripts/gm/README.md -[#1039]: https://github.com/informalsystems/ibc-rs/issues/1039 - -## v0.3.2 -*May 21st, 2021* - -This is minor release which brings substantial performance improvements -to the relayer (relaying 1000 packets now takes 2-5min instead of 1h+), -better UX for the `ft-transfer` command, and automatic deployment of -Docker images to Docker Hub. - -### FEATURES - -- [ibc-relayer-cli] - - Add a `--key` option to the tx raw ft-transfer command to override the account used for sending messages ([#963]) - -- [ibc-relayer] - - Add support for multiple keys to the keyring ([#963]) - -- [release] - - Released the official [Hermes image][hermes-docker] on Docker Hub ([#894]) - - Automatically deploy Docker Hub image during release ([#967]) - -### IMPROVEMENTS - -- [ibc-relayer] - - Batch together all events from all transactions included in a block ([#957]) - -### BUG FIXES - -- [ibc-relayer-cli] - - Prevent sending `ft-transfer` MsgTransfer on a non-Open channel ([#960]) - -### BREAKING CHANGES - -> Nothing - -[#868]: https://github.com/informalsystems/ibc-rs/issues/868 -[#894]: https://github.com/informalsystems/ibc-rs/pull/894 -[#957]: https://github.com/informalsystems/ibc-rs/issues/957 -[#960]: https://github.com/informalsystems/ibc-rs/issues/960 -[#963]: https://github.com/informalsystems/ibc-rs/issues/963 -[#967]: https://github.com/informalsystems/ibc-rs/issues/967 - -[hermes-docker]: https://hub.docker.com/r/informalsystems/hermes - -## v0.3.1 -*May 14h, 2021* - -This release improves the UX of a couple commands, fixes a bug related -to delay periods, and adds support for packet timeouts based on timestamps, -as well as support Protobuf-encoded keys. - -### FEATURES - -- [scripts] - - Created the Gaiad Manager `gm` CLI tool for managing gaiad instances on the local machine ([#902]) - -- [ibc-relayer] - - Add support for packet timeout based on timeout timestamp ([#937]) - - Added support for Protobuf-based Keyring ([#925]) - -### IMPROVEMENTS - -- [ibc-relayer-cli] - - Improve UX when querying non-existing connections and channels ([#875], [#920]) - - More details in error messages to increase debuggability ([#921], [#934]) - - Disallow creating a client with same source and destination chains ([#932]) - - Make packet worker more resilient to nodes being unreachable for a short amount of time ([#943]) - -### BUG FIXES - -- [ibc] - - Process raw `delay_period` field as nanoseconds instead of seconds. ([#927]) - -### BREAKING CHANGES - -> Nothing - - -[#875]: https://github.com/informalsystems/ibc-rs/issues/875 -[#920]: https://github.com/informalsystems/ibc-rs/issues/920 -[#902]: https://github.com/informalsystems/ibc-rs/issues/902 -[#921]: https://github.com/informalsystems/ibc-rs/issues/921 -[#925]: https://github.com/informalsystems/ibc-rs/issues/925 -[#927]: https://github.com/informalsystems/ibc-rs/issues/927 -[#932]: https://github.com/informalsystems/ibc-rs/issues/932 -[#934]: https://github.com/informalsystems/ibc-rs/issues/934 -[#937]: https://github.com/informalsystems/ibc-rs/issues/937 -[#943]: https://github.com/informalsystems/ibc-rs/issues/943 - - -## v0.3.0 -*May 7h, 2021* - -Special thanks to Jongwhan Lee (@leejw51crypto) for his contributions ([#878]). - -This release mostly focuses on improving the UX and the experimental multi-paths relayer (`start-multi` command), -which has been made more resilient against nodes going down, and is now able to clear pending packets -and periodically refresh IBC clients. The relayer now also supports [ICS 027 (Interchain Accounts)][ics27]. - -[ics27]: https://github.com/cosmos/ibc/blob/master/spec/app/ics-027-interchain-accounts/README.md - -### FEATURES - -- [ibc-relayer] - - Support for ICS27 ([#794]) - -- [ibc-relayer-cli] - - Added packet clearing and client refresh capabilities for the `start-multi` command ([#784], [#786]) - -### IMPROVEMENTS - -- [ibc] - - Reinstated `ics23` dependency ([#854]) - - Use proper Timestamp type to track time ([#758]) - -- [ibc-relayer] - - Change the default for client creation to allow governance recovery in case of expiration or misbehaviour ([#785]) - - Use a single supervisor in `start-multi` to subscribe to all configured chains ([#862]) - - The `start-multi` command is now more resilient to a node not being up or going down, and will attempt to reconnect ([#871]) - -### BUG FIXES - -- [ibc] - - Fix parsing in `chain_version` when chain identifier has multiple dashes ([#878]) - -- [ibc-relayer] - - Fix pagination in gRPC query for clients ([#811]) - - Fix relayer crash when hermes starts in the same time as packets are being sent ([#851]) - - Fix missing port information in `hermes query channels` ([#840]) - - Fix crash during initialization of event monitor when node is down ([#863]) - - Spawn a single Tokio runtime for the whole supervisor instead of one per chain ([#909]) - -- [ibc-relayer-cli] - - Fix for `ft-transfer` mismatching arguments ([#869]) - - Fix channel destination chain mismatch on unreceived-packets or unreceived-acks ([#873]) - -### BREAKING CHANGES - -- [ibc-relayer] - - `hermes -j query channels` command now returns `result` array with the format - `[{"channel_id":"channel-0","port_id":"transfer"}, ...]` instead of `["channel-0", ...]` ([#840]) - - -[#758]: https://github.com/informalsystems/ibc-rs/issues/758 -[#784]: https://github.com/informalsystems/ibc-rs/issues/784 -[#785]: https://github.com/informalsystems/ibc-rs/issues/785 -[#786]: https://github.com/informalsystems/ibc-rs/issues/786 -[#794]: https://github.com/informalsystems/ibc-rs/issues/794 -[#811]: https://github.com/informalsystems/ibc-rs/issues/811 -[#840]: https://github.com/informalsystems/ibc-rs/issues/840 -[#851]: https://github.com/informalsystems/ibc-rs/issues/851 -[#854]: https://github.com/informalsystems/ibc-rs/issues/854 -[#862]: https://github.com/informalsystems/ibc-rs/issues/862 -[#863]: https://github.com/informalsystems/ibc-rs/issues/863 -[#869]: https://github.com/informalsystems/ibc-rs/issues/869 -[#871]: https://github.com/informalsystems/ibc-rs/issues/871 -[#873]: https://github.com/informalsystems/ibc-rs/issues/873 -[#878]: https://github.com/informalsystems/ibc-rs/issues/878 -[#909]: https://github.com/informalsystems/ibc-rs/issues/909 - -## v0.2.0 -*April 14th, 2021* - -This release includes initial support for relaying over multiple paths from a single `hermes` instance. -Adds support for relayer restart, where pending packets are cleared. -Includes support for ordered channels, packet delay, misbehaviour detection and evidence submission, client upgrade after counterparty chain upgrades. - -This release brings improvements to the relayer UX by providing new and updated commands for keys, client, connection and channel management. -In addition, it simplifies the configuration of and integration with the light client. - -This release also finalizes the initial implementation of all the ICS 004 handlers. - -### FEATURES - -- Update to `tendermint-rs` v0.19.0 ([#798]) - -- [ibc] - - Added handler(s) for sending packets ([#695]), recv. and ack. packets ([#736]), and timeouts ([#362]) - -- [ibc-relayer] - - Support for relayer restart ([#561]) - - Add support for ordered channels ([#599]) - - Misbehaviour detection and evidence submission ([#632]) - - Use a stateless light client without a runtime ([#673]) - -- [ibc-relayer-cli] - - Added `create connection` and `create channel` CLIs ([#630], [#715]) - - Proposed ADR 006 to describe Hermes v0.2.0 use-cases ([#637]) - - Added `client-upgrade` CLI ([#357]) - - Added delay feature for packet relaying ([#640]) - - Update gaia to version 4.2.0 for e2e tests on CI ([#809]) - - Add `start-multi` command to relay on all paths defined in the configuration ([#748]) - - Add option to specify which events to listen for in `listen` command ([#550]) - - Add option to customise receiver address for `ft-transfer` command ([#806]) - - Add `keys restore` command to import a signing key from its mnemonic ([#813]) - -### IMPROVEMENTS - -- [ibc] - - Follow Rust guidelines naming conventions ([#689]) - - Per client structure modules ([#740]) - - MBT: use modelator crate ([#761]) - -- [ibc-relayer] - - Consistent identifier handling across ICS 02, 03 and 04 ([#622]) - -- [ibc-relayer-cli] - - Clarified success path for updating a client that is already up-to-date ([#734]) - - Added `create` and `update` wrappers for client raw commands ([#772]) - - Output by default is human-readable, and JSON is optional ([#805]) - -### BUG FIXES - -- [ibc] - - Fix overflow bug in ICS03 client consensus height verification method ([#685]) - - Allow a conn open ack to succeed in the happy case ([#699]) - -- [ibc-relayer] - - Replaced `rust-crypto` & `bitcoin-wallet` deprecated dependencies ([#352]) - - Fix for hard-coded account number ([#752]) - - Fix for chains that don't have `cosmos` account prefix ([#416]) - - Fix for building the `trusted_validator_set` for the header used in client updates ([#770]) - - Don't send `MsgAcknowledgment` if channel is closed ([#675]) - - Fix a bug where the keys addresses had their account prefix overriden by the prefix in the configuration ([#751]) - -- [ibc-relayer-cli] - - Hermes guide: improved installation guideline ([#672]) - - Make fee denom and amount configurable ([#754]) - -- [ibc-proto] - - Fix for proto files re-compilation bug ([#801]) - -### BREAKING CHANGES - -- [ibc] - - `MsgConnectionOpenAck.counterparty_connection_id` is now a `ConnectionId` instead of an `Option`([#700]) - -- [ibc-relayer] - - Remove the light client configuration from the global configuration ([#793]) - -- [ibc-relayer-cli] - - Remove the light add and light rm commands ([#793]) - - -[#352]: https://github.com/informalsystems/ibc-rs/issues/352 -[#362]: https://github.com/informalsystems/ibc-rs/issues/362 -[#357]: https://github.com/informalsystems/ibc-rs/issues/357 -[#416]: https://github.com/informalsystems/ibc-rs/issues/416 -[#561]: https://github.com/informalsystems/ibc-rs/issues/561 -[#550]: https://github.com/informalsystems/ibc-rs/issues/550 -[#599]: https://github.com/informalsystems/ibc-rs/issues/599 -[#630]: https://github.com/informalsystems/ibc-rs/issues/630 -[#632]: https://github.com/informalsystems/ibc-rs/issues/632 -[#640]: https://github.com/informalsystems/ibc-rs/issues/640 -[#672]: https://github.com/informalsystems/ibc-rs/issues/672 -[#673]: https://github.com/informalsystems/ibc-rs/issues/673 -[#675]: https://github.com/informalsystems/ibc-rs/issues/675 -[#685]: https://github.com/informalsystems/ibc-rs/issues/685 -[#689]: https://github.com/informalsystems/ibc-rs/issues/689 -[#695]: https://github.com/informalsystems/ibc-rs/issues/695 -[#699]: https://github.com/informalsystems/ibc-rs/issues/699 -[#700]: https://github.com/informalsystems/ibc-rs/pull/700 -[#715]: https://github.com/informalsystems/ibc-rs/issues/715 -[#734]: https://github.com/informalsystems/ibc-rs/issues/734 -[#736]: https://github.com/informalsystems/ibc-rs/issues/736 -[#740]: https://github.com/informalsystems/ibc-rs/issues/740 -[#748]: https://github.com/informalsystems/ibc-rs/issues/748 -[#751]: https://github.com/informalsystems/ibc-rs/issues/751 -[#752]: https://github.com/informalsystems/ibc-rs/issues/752 -[#754]: https://github.com/informalsystems/ibc-rs/issues/754 -[#761]: https://github.com/informalsystems/ibc-rs/issues/761 -[#772]: https://github.com/informalsystems/ibc-rs/issues/772 -[#770]: https://github.com/informalsystems/ibc-rs/issues/770 -[#793]: https://github.com/informalsystems/ibc-rs/pull/793 -[#798]: https://github.com/informalsystems/ibc-rs/issues/798 -[#801]: https://github.com/informalsystems/ibc-rs/issues/801 -[#805]: https://github.com/informalsystems/ibc-rs/issues/805 -[#806]: https://github.com/informalsystems/ibc-rs/issues/806 -[#809]: https://github.com/informalsystems/ibc-rs/issues/809 - - -## v0.1.1 -*February 17, 2021* - -This release brings a quick fix for a problem with a dependency of crate -`ibc-relayer`, which causes build & installation issues. Many thanks to -@Fraccaman for bringing this problem to our attention! ([#672]) - - -Additionally, this release also introduces initial implementation for most of -ICS 004 handlers, and several bug fixes and improvements, e.g., refactored -some CLI code, refactored the Height type in the IBC Events, and a bug fix -involving packet acks in a 3-chain setup. More details below. - -### FEATURES -- [ibc-relayer] - - Listen to channel close initialization event and perform the close handshake ([#560]) - - Updated to tendermint-rs `v0.18.1` ([#682], [#671]) - -### IMPROVEMENTS - -- [ibc] - - Change event height to ICS height ([#549]) - -- [ibc-relayer-cli] - - Cleanup CLI code ([#572]) - -### BUG FIXES - -- [ibc] - - Fix panic in conn open try when no connection id is provided ([#626]) - - Disable MBT tests if the "mocks" feature is not enabled ([#643]) - -- [ibc-relayer] - - Quick fix for `funty` breaking change bug ([#665]) - -- [ibc-relayer-cli] - - Fix wrong acks sent with `tx raw packet-ack` in a 3-chain setup ([#614]) - -### BREAKING CHANGES - -- [ibc] - - Implementation of the `ChanOpenAck`, `ChanOpenConfirm`, `ChanCloseInit`, and `ChanCloseConfirm` handlers ([#316]) - - Remove dependency on `tendermint-rpc` ([#624]) - -- [ibc-relayer-cli] - - Remove the `proof` option from CLI ([#572]) - -[#316]: https://github.com/informalsystems/ibc-rs/issues/316 -[#549]: https://github.com/informalsystems/ibc-rs/issues/549 -[#560]: https://github.com/informalsystems/ibc-rs/issues/560 -[#572]: https://github.com/informalsystems/ibc-rs/issues/572 -[#614]: https://github.com/informalsystems/ibc-rs/issues/614 -[#622]: https://github.com/informalsystems/ibc-rs/issues/622 -[#624]: https://github.com/informalsystems/ibc-rs/issues/624 -[#626]: https://github.com/informalsystems/ibc-rs/issues/626 -[#637]: https://github.com/informalsystems/ibc-rs/issues/637 -[#643]: https://github.com/informalsystems/ibc-rs/issues/643 -[#665]: https://github.com/informalsystems/ibc-rs/issues/665 -[#671]: https://github.com/informalsystems/ibc-rs/pull/671 -[#682]: https://github.com/informalsystems/ibc-rs/issues/682 - -[ibc]: https://github.com/informalsystems/ibc-rs/tree/master/modules -[ibc-relayer-cli]: https://github.com/informalsystems/ibc-rs/tree/master/relayer-cli - -## v0.1.0 -*February 4, 2021* - -🎉 This release brings the first publication of `ibc-relayer` and -`ibc-relayer-cli` to [crates.io](https://crates.io). - -Noteworthy changes in this release include: - -- The binary in the `ibc-relayer-cli` crate was given the name Hermes. -- We published a comprehensive guide for Hermes at [hermes.informal.systems](https://hermes.informal.systems). -- Major improvements to user experience, in particular at CLI level: JSON output, - configurable log output level, dedicated channel handshake command, as well as - overall improvements to error display and output. - -### FEATURES - -- Continous Integration (CI) end-to-end (e2e) testing with gaia v4 ([#32], [#582], [#602]) -- Add support for streamlining releases ([#507]) - -- [ibc-relayer-cli] - - Implement command to query the channels associated with a connection ([#505]) - - JSON output for queries and txs ([#500]) - - Added 'required' annotation for CLIs queries & txs; better error display ([#555]) - - Implement commands for channel close init and confirm ([#538]) - - Implement command to perform the handshake for a new channel ([#557]) - - Query all clients command ([#552]) - - Query all connections command ([#553]) - - Query all channels command ([#568]) - - Added a relayer binary guide ([#542]) - - Split the dev-env script in `setup_chains` and `init_clients` ([#577]) - -- [ibc-relayer] - - Added retry mechanism, restructured relayer ([#519]) - - Relay `MsgTimeoutOnClose` if counterparty channel state is `State::Closed` - -- [ibc] - - Add `MsgTimeoutOnClose` message type ([#563]) - - Implement `MsgChannelOpenTry` message handler ([#543]) - -### IMPROVEMENTS - -- Update to `tendermint-rs` v0.18.0 ([#517], [#583]) -- Update to `tokio` 1.0, `prost` 0.7 and `tonic` 0.4 ([#527]) - -- [ibc-relayer-cli] - - Replace `ChannelConfig` in `Channel::new` ([#511]) - - Add `packet-send` CLI ([#470]) - - UX improvements for relayer txs ([#536], [#540], [#554]) - - Allow running standalone commands concurrently to the main relayer loop ([#501]) - - Remove the simd-based integration tests ([#593]) - -- [ibc-relayer] - - Performance improvements ([#514], [#537]) - - Fix for mismatching `bitcoin` dep ([#525]) - -- [ibc] - - Clean the `validate_basic` method ([#94]) - - `MsgConnectionOpenAck` testing improvements ([#306]) - -### BUG FIXES: -- [ibc-relayer-cli] - - Help and usage commands show 'hermes' for executable name ([#590]) - -- [ibc] - - Fix for storing `ClientType` upon 'create-client' ([#513]) - -### BREAKING CHANGES: - -- [ibc] - - The `ibc::handler::Event` is removed and handlers now produce `ibc::events::IBCEvent`s ([#535]) - -[#32]: https://github.com/informalsystems/ibc-rs/issues/32 -[#94]: https://github.com/informalsystems/ibc-rs/issues/94 -[#306]: https://github.com/informalsystems/ibc-rs/issues/306 -[#470]: https://github.com/informalsystems/ibc-rs/issues/470 -[#500]: https://github.com/informalsystems/ibc-rs/issues/500 -[#501]: https://github.com/informalsystems/ibc-rs/issues/501 -[#505]: https://github.com/informalsystems/ibc-rs/issues/505 -[#507]: https://github.com/informalsystems/ibc-rs/issues/507 -[#511]: https://github.com/informalsystems/ibc-rs/pull/511 -[#513]: https://github.com/informalsystems/ibc-rs/issues/513 -[#514]: https://github.com/informalsystems/ibc-rs/issues/514 -[#517]: https://github.com/informalsystems/ibc-rs/issues/517 -[#519]: https://github.com/informalsystems/ibc-rs/issues/519 -[#525]: https://github.com/informalsystems/ibc-rs/issues/525 -[#527]: https://github.com/informalsystems/ibc-rs/issues/527 -[#535]: https://github.com/informalsystems/ibc-rs/issues/535 -[#536]: https://github.com/informalsystems/ibc-rs/issues/536 -[#537]: https://github.com/informalsystems/ibc-rs/issues/537 -[#538]: https://github.com/informalsystems/ibc-rs/issues/538 -[#540]: https://github.com/informalsystems/ibc-rs/issues/540 -[#542]: https://github.com/informalsystems/ibc-rs/issues/542 -[#543]: https://github.com/informalsystems/ibc-rs/issues/543 -[#552]: https://github.com/informalsystems/ibc-rs/issues/553 -[#553]: https://github.com/informalsystems/ibc-rs/issues/553 -[#554]: https://github.com/informalsystems/ibc-rs/issues/554 -[#555]: https://github.com/informalsystems/ibc-rs/issues/555 -[#557]: https://github.com/informalsystems/ibc-rs/issues/557 -[#563]: https://github.com/informalsystems/ibc-rs/issues/563 -[#568]: https://github.com/informalsystems/ibc-rs/issues/568 -[#577]: https://github.com/informalsystems/ibc-rs/issues/577 -[#582]: https://github.com/informalsystems/ibc-rs/issues/582 -[#583]: https://github.com/informalsystems/ibc-rs/issues/583 -[#590]: https://github.com/informalsystems/ibc-rs/issues/590 -[#593]: https://github.com/informalsystems/ibc-rs/issues/593 -[#602]: https://github.com/informalsystems/ibc-rs/issues/602 - -## v0.0.6 -*December 23, 2020* - -This release focuses on upgrading the relayer and ibc modules to the latest interfaces from the ecosystem: -tendermint-rs `v0.17`, which brings the protobuf changes from tendermint `v0.34.0`, plus alignment with -the latest cosmos proto versions from `v0.40.0-rc5` (sometimes called 'stargate-5'). - -### FEATURES -- Update to tendermint-rs version `0.17` ([#451]) -- Update to cosmos-sdk IBC proto version `v0.40.0-rc5` ([#451]) - -- [ibc-relayer] - -- [ibc-relayer-cli] - - Packet CLIs for recv_packet ([#443]) - - Packet CLIs for acknowledging packets ([#468]) - -### IMPROVEMENTS -- [ibc-relayer] - - Mock chain (implementing IBC handlers) and integration against CLI ([#158]) - - Relayer tests for client update (ping pong) against MockChain ([#381]) - - Relayer refactor to improve testing and add semantic dependencies ([#447]) - -[#158]: https://github.com/informalsystems/ibc-rs/issues/158 -[#379]: https://github.com/informalsystems/ibc-rs/issues/379 -[#381]: https://github.com/informalsystems/ibc-rs/issues/381 -[#443]: https://github.com/informalsystems/ibc-rs/issues/443 -[#447]: https://github.com/informalsystems/ibc-rs/issues/447 -[#451]: https://github.com/informalsystems/ibc-rs/issues/451 -[#468]: https://github.com/informalsystems/ibc-rs/issues/468 - - -## v0.0.5 -*December 2, 2020* - -This release focuses on implementing relayer and relayer-cli functionality towards a full v0 implementation. -We now have the full-stack implementation for supporting client creation & updates, as well as connection- and channel handshakes. -We also consolidated our TLA+ specs into an "IBC Core TLA+ specification," and added ICS 020 spec. - -Special thanks to external contributors for this release: @CharlyCst ([#347], [#419]). - -- [ibc-relayer-cli] - - Add `--all` option to `light rm` command to remove all peers for a given chain ([#431]) - -[#431]: https://github.com/informalsystems/ibc-rs/issues/431 - -### FEATURES - -- Update to tendermint-rs version `0.17-RC3` ([#403]) -- [changelog] Added "unreleased" section in `CHANGELOG.MD` to help streamline releases ([#274]) -- [ibc] - - Implement flexible connection id selection ([#332]) - - ICS 4 Domain Types for channel handshakes and packets ([#315], [#95]) - - Introduce LightBlock support for MockContext ([#389]) -- [ibc-relayer] - - Retrieve account sequence information from a chain using a GRPC client (#337) - - Implementation of chain runtime for v0 ([#330]) - - Integrate relayer spike into ibc-relayer crate ([#335]) - - Implement `query_header_at_height` via plain RPC queries (no light client verification) ([#336]) - - Implement the relayer logic for connection handshake messages ([#358], [#359], [#360]) - - Implement the relayer logic for channel handshake messages ([#371], [#372], [#373], [#374]) -- [ibc-relayer-cli] - - Merge light clients config in relayer config and add commands to add/remove light clients ([#348]) - - CLI for client update message ([#277]) - - Implement the relayer CLI for connection handshake messages ([#358], [#359], [#360]) - - Implement the relayer CLI for channel handshake messages ([#371], [#372], [#373], [#374]) - - Added basic client, connection, and channel lifecyle in relayer v0 ([#376], [#377], [#378]) - - Implement commands to add and list keys for a chain ([#363]) - - Allow overriding of peer_id, height and hash in light add command ([#428]) -- [proto-compiler] - - Refactor and allow specifying a commit at which the Cosmos SDK should be checked out ([#366]) - - Add a `--tag` option to the `clone-sdk` command to check out a tag instead of a commit ([#369]) - - Fix `--out` command line parameter (instead of `--path`) ([#419]) -- [ibc/relayer-spec] - - ICS 020 spec in TLA+ ([#386]) - - Prepare IBC Core TLA+ specs ([#404]) - -### IMPROVEMENTS - -- [ibc-relayer] - - Pin chain runtime against Tokio 0.2 by downgrading for 0.3 to avoid dependency hell ([#415], follow up to [#402]) -- [ibc-relayer-cli] - - Split tasks spawned by CLI commands into their own modules ([#331]) - - V0 command implementation ([#346]) -- [ibc] - - Split `msgs.rs` of ICS002 in separate modules ([#367]) - - Fixed inconsistent versioning for ICS003 and ICS004 ([#97]) - - Fixed `get_sign_bytes` method for messages ([#98]) - - Homogenize ConnectionReader trait so that all functions return owned objects ([#347]) - - Align with tendermint-rs in the domain type definition of `block::Id` ([#338]) - - -[#95]: https://github.com/informalsystems/ibc-rs/issues/95 -[#97]: https://github.com/informalsystems/ibc-rs/issues/97 -[#98]: https://github.com/informalsystems/ibc-rs/issues/98 -[#274]: https://github.com/informalsystems/ibc-rs/issues/274 -[#277]: https://github.com/informalsystems/ibc-rs/issues/277 -[#315]: https://github.com/informalsystems/ibc-rs/issues/315 -[#330]: https://github.com/informalsystems/ibc-rs/issues/330 -[#332]: https://github.com/informalsystems/ibc-rs/issues/332 -[#335]: https://github.com/informalsystems/ibc-rs/pull/335 -[#336]: https://github.com/informalsystems/ibc-rs/issues/336 -[#337]: https://github.com/informalsystems/ibc-rs/issues/337 -[#338]: https://github.com/informalsystems/ibc-rs/issues/338 -[#346]: https://github.com/informalsystems/ibc-rs/issues/346 -[#347]: https://github.com/informalsystems/ibc-rs/issues/347 -[#348]: https://github.com/informalsystems/ibc-rs/pull/348 -[#358]: https://github.com/informalsystems/ibc-rs/issues/358 -[#359]: https://github.com/informalsystems/ibc-rs/issues/359 -[#360]: https://github.com/informalsystems/ibc-rs/issues/360 -[#363]: https://github.com/informalsystems/ibc-rs/issues/363 -[#366]: https://github.com/informalsystems/ibc-rs/issues/366 -[#367]: https://github.com/informalsystems/ibc-rs/issues/367 -[#368]: https://github.com/informalsystems/ibc-rs/issues/368 -[#369]: https://github.com/informalsystems/ibc-rs/pull/369 -[#371]: https://github.com/informalsystems/ibc-rs/issues/371 -[#372]: https://github.com/informalsystems/ibc-rs/issues/372 -[#373]: https://github.com/informalsystems/ibc-rs/issues/373 -[#374]: https://github.com/informalsystems/ibc-rs/issues/374 -[#376]: https://github.com/informalsystems/ibc-rs/issues/376 -[#377]: https://github.com/informalsystems/ibc-rs/issues/377 -[#378]: https://github.com/informalsystems/ibc-rs/issues/378 -[#386]: https://github.com/informalsystems/ibc-rs/issues/386 -[#389]: https://github.com/informalsystems/ibc-rs/issues/389 -[#402]: https://github.com/informalsystems/ibc-rs/issues/402 -[#403]: https://github.com/informalsystems/ibc-rs/issues/403 -[#404]: https://github.com/informalsystems/ibc-rs/issues/404 -[#419]: https://github.com/informalsystems/ibc-rs/issues/419 -[#415]: https://github.com/informalsystems/ibc-rs/issues/415 -[#428]: https://github.com/informalsystems/ibc-rs/issues/428 -[changelog]: https://github.com/informalsystems/ibc-rs/tree/master/CHANGELOG.md -[proto-compiler]: https://github.com/informalsystems/ibc-rs/tree/master/proto-compiler - -## v0.0.4 -*October 19, 2020* - -This release focuses on alignment with the Cosmos ecosystem: adaptations to Tendermint-rs 0.16 and subsequently to 0.17 (`0.17.0-rc1`), and numerous protobuf updates following latest stargate releases. - -Additional highlights: -- Adding DomainTypes and (de)serialization capability to ICS02 and ICS03 messages and structures. -- Improvements of the IBC message processor framework (handlers, contexts and mocks). -- Added initial implementations for the ICS26 (routing module) and ICS18 (basic relayer algorithms module) for use in testing. -- Also added support for packet handling in the relayer algorithm specifications. - -### BREAKING CHANGES: -- [ibc-relayer] & [ibc] Alignment with ecosystem updates: - - Compatibility with the latest protobuf (Gaia stargate-3 and stargate-4) ([#191], [#272], [#273], [#278]) - - Adaptations to tendermint 0.17 ([#286], [#293], [#300], [#302], [#308]) -- [ibc-relayer] UX improvement: Remove proof option from client connections command ([#205]) - -### FEATURES: -- [ibc/ics03] ICS03 Ack and Confirm message processors ([#223]) -- [ibc-relayer-cli] - - Relayer CLIs for client messages ([#207]) - - Relayer CLIs for connection-open-init ([#206]) - - Queries for consensus state and client state ([#149], [#150]) -- [ibc] Routing module minimal implementation for MVP ([#159], [#232]) -- [ibc/relayer-spec] Relayer specification for packet handling ([#229], [#234], [#237]) -- [ibc/relayer-spec] Basic packet handling in TLA+([#124]) -- [ibc] Basic relayer functionality: a test with ClientUpdate ping-pong between two mocked chains ([#276]) - -### IMPROVEMENTS: -- [ibc] Implemented the `DomainType` trait for IBC proto structures ([#245], [#249]). -- [ibc] & [ibc-proto] Several improvements to message processors, among which ([#218]): - - ICS03 connection handshake protocol initial implementation and tests ([#160]) - - Add capability to decode from protobuf Any* type into Tendermint and Mock client states - - Cleanup Any* client wrappers related code - - Migrate handlers to newer protobuf definitions ([#226]) - - Extend client context mock ([#221]) - - Context mock simplifications and cleanup ([#269], [#295], [#296], [#297]) -- [ibc/ics03] Split `msgs.rs` in multiple files, implement `From` for all messages ([#253]) -- [ibc-proto] - - Move ibc-proto source code into ibc-rs ([#142]) and fixed code deduplication ([#282], [#284]) - - Consolidate proto-compiler logic [#241] -- [ibc/relayer-spec] Add support for APALACHE to the Relayer TLA+ spec ([#165]) -- [ibc-relayer] Update to tendermint v.0.16 and integrate with the new light client implementation ([#90], [#243]) - -### BUG FIXES: -- [ibc] Removed "Uninitialized" state from connection ([#217]) -- [ibc-relayer-cli] Fix for client query subcommands ([#231]) -- [disclosure-log] & [spec/connection-handshake] Disclosed bugs in ICS3 version negotiation and proposed a fix ([#209], [#213]) - -[#90]: https://github.com/informalsystems/ibc-rs/issues/90 -[#124]: https://github.com/informalsystems/ibc-rs/issues/124 -[#142]: https://github.com/informalsystems/ibc-rs/issues/142 -[#149]: https://github.com/informalsystems/ibc-rs/issues/149 -[#150]: https://github.com/informalsystems/ibc-rs/issues/150 -[#159]: https://github.com/informalsystems/ibc-rs/issues/159 -[#160]: https://github.com/informalsystems/ibc-rs/issues/160 -[#165]: https://github.com/informalsystems/ibc-rs/issues/165 -[#191]: https://github.com/informalsystems/ibc-rs/issues/191 -[#205]: https://github.com/informalsystems/ibc-rs/issues/205 -[#206]: https://github.com/informalsystems/ibc-rs/issues/206 -[#207]: https://github.com/informalsystems/ibc-rs/issues/207 -[#209]: https://github.com/informalsystems/ibc-rs/issues/209 -[#213]: https://github.com/informalsystems/ibc-rs/issues/213 -[#217]: https://github.com/informalsystems/ibc-rs/issues/217 -[#218]: https://github.com/informalsystems/ibc-rs/issues/218 -[#221]: https://github.com/informalsystems/ibc-rs/issues/221 -[#223]: https://github.com/informalsystems/ibc-rs/issues/223 -[#226]: https://github.com/informalsystems/ibc-rs/issues/226 -[#229]: https://github.com/informalsystems/ibc-rs/issues/229 -[#231]: https://github.com/informalsystems/ibc-rs/issues/231 -[#232]: https://github.com/informalsystems/ibc-rs/issues/232 -[#234]: https://github.com/informalsystems/ibc-rs/issues/234 -[#237]: https://github.com/informalsystems/ibc-rs/issues/237 -[#241]: https://github.com/informalsystems/ibc-rs/issues/241 -[#243]: https://github.com/informalsystems/ibc-rs/issues/243 -[#245]: https://github.com/informalsystems/ibc-rs/issues/245 -[#249]: https://github.com/informalsystems/ibc-rs/issues/249 -[#253]: https://github.com/informalsystems/ibc-rs/issues/253 -[#269]: https://github.com/informalsystems/ibc-rs/issues/269 -[#272]: https://github.com/informalsystems/ibc-rs/issues/272 -[#273]: https://github.com/informalsystems/ibc-rs/issues/273 -[#276]: https://github.com/informalsystems/ibc-rs/issues/276 -[#278]: https://github.com/informalsystems/ibc-rs/issues/278 -[#282]: https://github.com/informalsystems/ibc-rs/issues/282 -[#284]: https://github.com/informalsystems/ibc-rs/issues/284 -[#286]: https://github.com/informalsystems/ibc-rs/issues/286 -[#293]: https://github.com/informalsystems/ibc-rs/issues/293 -[#295]: https://github.com/informalsystems/ibc-rs/issues/295 -[#296]: https://github.com/informalsystems/ibc-rs/issues/296 -[#297]: https://github.com/informalsystems/ibc-rs/issues/297 -[#300]: https://github.com/informalsystems/ibc-rs/issues/300 -[#302]: https://github.com/informalsystems/ibc-rs/issues/302 -[#308]: https://github.com/informalsystems/ibc-rs/issues/308 -[ibc-proto]: https://github.com/informalsystems/ibc-rs/tree/master/proto -[disclosure-log]: https://github.com/informalsystems/ibc-rs/blob/master/docs/disclosure-log.md -[spec/connection-handshake]: https://github.com/informalsystems/ibc-rs/tree/master/docs/spec/connection-handshake -[ibc-relayer]: https://github.com/informalsystems/ibc-rs/tree/master/relayer - -## v0.0.3 -*September 1, 2020* - -This release focuses on the IBC message processor framework and initial -implementations in ICS02 and ICS07. It also introduces an initial specification for the relayer algorithm. - -Other highlights: -- The ibc crate is published as [ibc](https://crates.io/crates/ibc) in crates.io -- ADR-001 and ADR-003 are complete. 🎉 - -### BREAKING CHANGES: -- [ibc] Renamed `modules` crate to `ibc` crate. Version number for the new crate is not reset. ([#198]) -- [ibc/ics02] `ConnectionId`s are now decoded to `Vec` and validated instead of `Vec` ([#185]) -- [ibc/ics03] Removed `Connection` and `ConnectionCounterparty` traits ([#193]) -- [ibc/ics04] Removed `Channel` and `ChannelCounterparty` traits ([#192]) - -### FEATURES: -- [ibc/ics02] partial implementation of message handler ([#119], [#194]) -- [ibc/ics07] partial implementation of message handler ([#119], [#194]) -- [architecture/ADR-003] Proposal for IBC handler (message processor) architecture ([#119], [#194]) -- [ibc/relayer-spec] Detailed technical specification of the relayer algorithm with focus on client update ([#84]) -- [architecture/ADR-001] Documentation for the repository structure ([#1]) -- [architecture/FSM-1] Connection Handshake FSM English description ([#122]) - -### IMPROVEMENTS: -- [contributing] Updated CONTRIBUTING.md. Please read before opening PRs ([#195]) -- [ibc-relayer-cli] Refactor ConnectionId decoding in `query client` ([#185]) - -### BUG FIXES: -- [ibc/ics24] Identifiers limit update according to ICS specs ([#168]) - -[ibc/relayer-spec]: https://github.com/informalsystems/ibc-rs/blob/master/docs/spec/relayer/Relayer.md -[#84]: https://github.com/informalsystems/ibc-rs/issues/84 -[architecture/ADR-001]: https://github.com/informalsystems/ibc-rs/blob/master/docs/architecture/adr-001-repo.md -[#1]: https://github.com/informalsystems/ibc-rs/issues/1 -[contributing]: https://github.com/informalsystems/ibc-rs/blob/master/CONTRIBUTING.md -[#195]: https://github.com/informalsystems/ibc-rs/pull/195 -[ibc]: https://github.com/informalsystems/ibc-rs/tree/master/modules -[#198]: https://github.com/informalsystems/ibc-rs/issues/198 -[ibc/ics02]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core/ics02_client -[#185]: https://github.com/informalsystems/ibc-rs/issues/185 -[ibc/ics03]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core/ics03_connection -[#193]: https://github.com/informalsystems/ibc-rs/issues/193 -[ibc/ics04]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core/ics04_channel -[#192]: https://github.com/informalsystems/ibc-rs/issues/192 -[ibc-relayer-cli]: https://github.com/informalsystems/ibc-rs/tree/master/relayer-cli -[architecture/FSM-1]: https://github.com/informalsystems/ibc-rs/blob/v0.1.0/docs/architecture/fsm-async-connection.md -[#122]: https://github.com/informalsystems/ibc-rs/issues/122 -[architecture/ADR-003]: https://github.com/informalsystems/ibc-rs/blob/master/docs/architecture/adr-003-handler-implementation.md -[#119]: https://github.com/informalsystems/ibc-rs/issues/119 -[#194]: https://github.com/informalsystems/ibc-rs/issues/194 -[ibc/ics24]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core/ics24_host -[#168]: https://github.com/informalsystems/ibc-rs/issues/168 -[ibc/ics07]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/clients/ics07_tendermint - -## v0.0.2 - -*August 1, 2020* - -This release is focused on updating the query system from amino to protobuf, -implementing a few queries from the CLI, and establishing an initial testing framework -that will support multiple chain types. - -It does not target a stable release of Cosmos-SDK chains, but is tracking -the latest state of development towards the Cosmos-SDK Stargate release. - -### BREAKING CHANGES: - -- [ibc|ibc-relayer] Refactor queries, paths, and Chain trait to reduce code and use - protobuf instead of Amino. - [\#152](https://github.com/informalsystems/ibc-rs/pull/152), - [\#174](https://github.com/informalsystems/ibc-rs/pull/174), - [\#155](https://github.com/informalsystems/ibc-rs/pull/155) -- [repo] Moved relayer/cli to relayer-cli, relayer/relay to relayer. [\#183](https://github.com/informalsystems/ibc-rs/pull/183) - -### FEATURES: - -- [ibc-relayer] Query connections given client id. [\#169](https://github.com/informalsystems/ibc-rs/pull/169) -- [ibc-relayer] Query connection given connection id. [\#136](https://github.com/informalsystems/ibc-rs/pull/136) -- [ibc-relayer] Query channel given channel id and port [\#163](https://github.com/informalsystems/ibc-rs/pull/163) -- [spec] Channel closing datagrams in TLA+ [\#141](https://github.com/informalsystems/ibc-rs/pull/141) - -### IMPROVEMENTS: - -- [ci] Framework (scripts and Github Actions) for integration testing the relayer queries against - the Cosmos-SDK's `simd` binary with prepopulated IBC state in the genesis - [\#140](https://github.com/informalsystems/ibc-rs/pull/140), - [\#184](https://github.com/informalsystems/ibc-rs/pull/184) -- [ibc-relayer|ibc] Implemented better Raw type handling. [\#156](https://github.com/informalsystems/ibc-rs/pull/156) -- [repo] Add rust-toolchain file. [\#154](https://github.com/informalsystems/ibc-rs/pull/154) - -### BUG FIXES: - -- [ibc] Fixed the identifiers limits according to updated ics spec. [\#189](https://github.com/informalsystems/ibc-rs/pull/189) -- [ibc/relayer] Remove some warnings triggered during compilation due to dependency specification. [\#132](https://github.com/informalsystems/ibc-rs/pull/132) -- [ibc] Fix nightly runs. [\#161](https://github.com/informalsystems/ibc-rs/pull/161) -- [repo] Fix for incomplete licence terms. [\#153](https://github.com/informalsystems/ibc-rs/pull/153) - -## 0.0.1 - -*July 1st, 2020* - -This is the initial prototype release of an IBC relayer and TLA+ specifications. -There are no compatibility guarantees until v0.1.0. - -Includes: - -- Configuration file definition and validation -- Client state, consensus state, connection, channel queries. - - Note: deserialization is unimplemented as it has dependency on migration to protobuf for ABCI queries -- Per chain light clients threads are created and headers are periodically retrieved and verified. -- Per chain IBC event monitor threads are spawned and main event handler that receives them. - - Note: the event handler just displays the events. -- IBC Modules partial implementation for datastructures, messages and queries. -- Some English and TLA+ specifications for Connection & Channel Handshake as well as naive relayer algorithm. diff --git a/.changelog/unreleased/.gitkeep b/.changelog/unreleased/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/.changelog/unreleased/breaking-changes/ibc-relayer-cli/1075-change-key-name-flag.md b/.changelog/unreleased/breaking-changes/ibc-relayer-cli/1075-change-key-name-flag.md deleted file mode 100644 index 28e9d3a4d8..0000000000 --- a/.changelog/unreleased/breaking-changes/ibc-relayer-cli/1075-change-key-name-flag.md +++ /dev/null @@ -1 +0,0 @@ -- Merged commands `keys add` and `keys restore` into single command `keys add`. The flag to specify the key name for the CLI command `keys add` has been changed from `-n` to `-k`. Restoring a key now takes a file containing the mnemonic as input instead of directly taking the mnemonic. ([#1075](https://github.com/informalsystems/ibc-rs/issues/1075)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/ibc-relayer-cli/2143-config-validate-cli.md b/.changelog/unreleased/bug-fixes/ibc-relayer-cli/2143-config-validate-cli.md deleted file mode 100644 index 6ea31939c6..0000000000 --- a/.changelog/unreleased/bug-fixes/ibc-relayer-cli/2143-config-validate-cli.md +++ /dev/null @@ -1,2 +0,0 @@ -- CLI command `config validate` now correctly outputs an error if the configuration file - does not exist or is empty. ([#2143](https://github.com/informalsystems/ibc-rs/issues/2143)) \ No newline at end of file diff --git a/.changelog/unreleased/bug-fixes/ibc-relayer/1153-fix-execute-schedule-leaky-pipeline.md b/.changelog/unreleased/bug-fixes/ibc-relayer/1153-fix-execute-schedule-leaky-pipeline.md deleted file mode 100644 index 41a113d0e8..0000000000 --- a/.changelog/unreleased/bug-fixes/ibc-relayer/1153-fix-execute-schedule-leaky-pipeline.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix `execute_schedule` method dropping operational data due to improper - handling of errors. ([#2118](https://github.com/informalsystems/ibc-rs/issues/1153)) diff --git a/.changelog/unreleased/bug-fixes/relayer-cli/2168-conn-handshake-retry.md b/.changelog/unreleased/bug-fixes/relayer-cli/2168-conn-handshake-retry.md deleted file mode 100644 index a3dc964f4c..0000000000 --- a/.changelog/unreleased/bug-fixes/relayer-cli/2168-conn-handshake-retry.md +++ /dev/null @@ -1,3 +0,0 @@ -- Fix the flow for crate connection to ensure success - despite concurrent relayers racing to finish the handshake. - ([#2168](https://github.com/informalsystems/ibc-rs/issues/2168)) \ No newline at end of file diff --git a/.changelog/unreleased/features/ibc-relayer-cli/912-balance-subcommand-cli.md b/.changelog/unreleased/features/ibc-relayer-cli/912-balance-subcommand-cli.md deleted file mode 100644 index 29b56f825b..0000000000 --- a/.changelog/unreleased/features/ibc-relayer-cli/912-balance-subcommand-cli.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added CLI command `keys balance` which outputs the balance of an account associated with a - key. ([#912](https://github.com/informalsystems/ibc-rs/issues/912)) \ No newline at end of file diff --git a/.changelog/unreleased/features/ibc-relayer-cli/999-channel-client-subcommand-cli.md b/.changelog/unreleased/features/ibc-relayer-cli/999-channel-client-subcommand-cli.md deleted file mode 100644 index 6daf2f0af0..0000000000 --- a/.changelog/unreleased/features/ibc-relayer-cli/999-channel-client-subcommand-cli.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added CLI command `query channel client` which outputs the channel's client state. - ([#999](https://github.com/informalsystems/ibc-rs/issues/999)) \ No newline at end of file diff --git a/.changelog/unreleased/features/ibc-relayer/2240-chain-types.md b/.changelog/unreleased/features/ibc-relayer/2240-chain-types.md deleted file mode 100644 index 682f0a6588..0000000000 --- a/.changelog/unreleased/features/ibc-relayer/2240-chain-types.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add preliminary support for multiple chain types, which can be specified in - the chain configuration. At the moment only the `CosmosSdk` chain type is - supported. ([#2240](https://github.com/informalsystems/ibc-rs/issues/2240)) \ No newline at end of file diff --git a/.changelog/unreleased/features/proto/2277-proto-server.md b/.changelog/unreleased/features/proto/2277-proto-server.md deleted file mode 100644 index 723bb9ebb9..0000000000 --- a/.changelog/unreleased/features/proto/2277-proto-server.md +++ /dev/null @@ -1,2 +0,0 @@ -- Generate gRPC server code under feature 'server' - ([#2277](https://github.com/informalsystems/ibc-rs/issues/2277)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/ibc-relayer/1400-fee-related-error-message.md b/.changelog/unreleased/improvements/ibc-relayer/1400-fee-related-error-message.md deleted file mode 100644 index ed6d1c495e..0000000000 --- a/.changelog/unreleased/improvements/ibc-relayer/1400-fee-related-error-message.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added handler for SDK Err(13) in order to output an understanble error - message. ([#1400](https://github.com/informalsystems/ibc-rs/issues/1400)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/ibc/1759-complete-ics20.md b/.changelog/unreleased/improvements/ibc/1759-complete-ics20.md deleted file mode 100644 index 8d7cbbbbad..0000000000 --- a/.changelog/unreleased/improvements/ibc/1759-complete-ics20.md +++ /dev/null @@ -1 +0,0 @@ -- Complete ICS20 implementation ([#1759](https://github.com/informalsystems/ibc-rs/issues/1759)) diff --git a/.changelog/unreleased/improvements/relayer/2223-consolidate-chain-query-proven.md b/.changelog/unreleased/improvements/relayer/2223-consolidate-chain-query-proven.md deleted file mode 100644 index df7817429a..0000000000 --- a/.changelog/unreleased/improvements/relayer/2223-consolidate-chain-query-proven.md +++ /dev/null @@ -1,2 +0,0 @@ -- Consolidate ChainEndpoint::proven_* methods with their corresponding query_*() - form ([#2223](https://github.com/informalsystems/ibc-rs/issues/2223)) \ No newline at end of file diff --git a/.changelog/v0.10.0/breaking-changes/1660-msrv-1.57.md b/.changelog/v0.10.0/breaking-changes/1660-msrv-1.57.md deleted file mode 100644 index 398ea568f3..0000000000 --- a/.changelog/v0.10.0/breaking-changes/1660-msrv-1.57.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update MSRV to Rust 1.57 - ([#1660](https://github.com/informalsystems/ibc-rs/issues/1660)) diff --git a/.changelog/v0.10.0/breaking-changes/1665-tendermint-0.23.2.md b/.changelog/v0.10.0/breaking-changes/1665-tendermint-0.23.2.md deleted file mode 100644 index 9ffadf79d6..0000000000 --- a/.changelog/v0.10.0/breaking-changes/1665-tendermint-0.23.2.md +++ /dev/null @@ -1,2 +0,0 @@ -- Pin tendermint-rs dependencies to =0.23.2 - ([#1665](https://github.com/informalsystems/ibc-rs/pull/1665)) diff --git a/.changelog/v0.10.0/breaking-changes/ibc-relayer/1656-supervisor-spawn.md b/.changelog/v0.10.0/breaking-changes/ibc-relayer/1656-supervisor-spawn.md deleted file mode 100644 index aab7c64a3e..0000000000 --- a/.changelog/v0.10.0/breaking-changes/ibc-relayer/1656-supervisor-spawn.md +++ /dev/null @@ -1,3 +0,0 @@ -- Improve spawning of supervisor worker tasks ([#1656](https://github.com/informalsystems/ibc-rs/issues/1656)) - - The `Supervisor` struct is removed. - - Supervisor is now spawned using the `spawn_supervisor` function. diff --git a/.changelog/v0.10.0/breaking-changes/ibc/1618-get-frozen-height.md b/.changelog/v0.10.0/breaking-changes/ibc/1618-get-frozen-height.md deleted file mode 100644 index 544d3c083a..0000000000 --- a/.changelog/v0.10.0/breaking-changes/ibc/1618-get-frozen-height.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add the `frozen_height()` method to the `ClientState` trait. (Includes breaking changes to the Tendermint - `ClientState` API.) - ([#1618](https://github.com/informalsystems/ibc-rs/issues/1618)) diff --git a/.changelog/v0.10.0/breaking-changes/ibc/1665-remove-chrono.md b/.changelog/v0.10.0/breaking-changes/ibc/1665-remove-chrono.md deleted file mode 100644 index c58260ee02..0000000000 --- a/.changelog/v0.10.0/breaking-changes/ibc/1665-remove-chrono.md +++ /dev/null @@ -1,4 +0,0 @@ -- Remove `Timestamp` API that depended on the `chrono` crate: - ([#1665](https://github.com/informalsystems/ibc-rs/pull/1665)): - - `Timestamp::from_datetime`; use `From` - - `Timestamp::as_datetime`, superseded by `Timestamp::into_datetime` diff --git a/.changelog/v0.10.0/bug-fixes/1264-recover-acct-seq.md b/.changelog/v0.10.0/bug-fixes/1264-recover-acct-seq.md deleted file mode 100644 index 6a7bf3d093..0000000000 --- a/.changelog/v0.10.0/bug-fixes/1264-recover-acct-seq.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added a recovery mechanism to automatically retry or drop tx upon account - sequence mismatch errors ([#1264](https://github.com/informalsystems/ibc-rs/issues/1264)) diff --git a/.changelog/v0.10.0/bug-fixes/1634-update-unclog-instructions.md b/.changelog/v0.10.0/bug-fixes/1634-update-unclog-instructions.md deleted file mode 100644 index e259131b17..0000000000 --- a/.changelog/v0.10.0/bug-fixes/1634-update-unclog-instructions.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update `CONTRIBUTING.md` for latest version of unclog - ([#1634](https://github.com/informalsystems/ibc-rs/issues/1634)) \ No newline at end of file diff --git a/.changelog/v0.10.0/bug-fixes/ibc-relayer/1664-handle-expired-client.md b/.changelog/v0.10.0/bug-fixes/ibc-relayer/1664-handle-expired-client.md deleted file mode 100644 index 8af52818cd..0000000000 --- a/.changelog/v0.10.0/bug-fixes/ibc-relayer/1664-handle-expired-client.md +++ /dev/null @@ -1 +0,0 @@ -- Handle expired client errors in workers ([#1543](https://github.com/informalsystems/ibc-rs/issues/1543)) diff --git a/.changelog/v0.10.0/bug-fixes/ibc-relayer/1715-execute-schedule-after-packet-cmd.md b/.changelog/v0.10.0/bug-fixes/ibc-relayer/1715-execute-schedule-after-packet-cmd.md deleted file mode 100644 index 77aabfddad..0000000000 --- a/.changelog/v0.10.0/bug-fixes/ibc-relayer/1715-execute-schedule-after-packet-cmd.md +++ /dev/null @@ -1 +0,0 @@ -- Perform `execute_schedule` after handling packet commands in packet worker ([#1715](https://github.com/informalsystems/ibc-rs/issues/1715)) diff --git a/.changelog/v0.10.0/bug-fixes/ibc-relayer/1750-misbehavior-config.md b/.changelog/v0.10.0/bug-fixes/ibc-relayer/1750-misbehavior-config.md deleted file mode 100644 index 8c6ad062d1..0000000000 --- a/.changelog/v0.10.0/bug-fixes/ibc-relayer/1750-misbehavior-config.md +++ /dev/null @@ -1 +0,0 @@ -- Do not spawn detect misbehavior task if it is disabled in config [#1750](https://github.com/informalsystems/ibc-rs/issues/1750) diff --git a/.changelog/v0.10.0/bug-fixes/ibc/1573-delete-commitment-in-acknowledgePacket.md b/.changelog/v0.10.0/bug-fixes/ibc/1573-delete-commitment-in-acknowledgePacket.md deleted file mode 100644 index d8d3dc8a39..0000000000 --- a/.changelog/v0.10.0/bug-fixes/ibc/1573-delete-commitment-in-acknowledgePacket.md +++ /dev/null @@ -1,2 +0,0 @@ -- Delete packet commitment instead of acknowledgement in acknowledgePacket - [#1573](https://github.com/informalsystems/ibc-rs/issues/1573) \ No newline at end of file diff --git a/.changelog/v0.10.0/bug-fixes/ibc/1649-fix-chan-open-ack-verify.md b/.changelog/v0.10.0/bug-fixes/ibc/1649-fix-chan-open-ack-verify.md deleted file mode 100644 index 62eb667104..0000000000 --- a/.changelog/v0.10.0/bug-fixes/ibc/1649-fix-chan-open-ack-verify.md +++ /dev/null @@ -1,2 +0,0 @@ -- Set the `counterparty_channel_id` correctly to fix ICS04 [`chanOpenAck` handler verification](https://github.com/informalsystems/ibc-rs/blob/master/modules/src/core/ics04_channel/handler/chan_open_ack.rs) - ([#1649](https://github.com/informalsystems/ibc-rs/issues/1649)) diff --git a/.changelog/v0.10.0/bug-fixes/ibc/1697-assert-non-zero-trust-level.md b/.changelog/v0.10.0/bug-fixes/ibc/1697-assert-non-zero-trust-level.md deleted file mode 100644 index efb81f0339..0000000000 --- a/.changelog/v0.10.0/bug-fixes/ibc/1697-assert-non-zero-trust-level.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add missing assertion for non-zero trust-level in Tendermint client initialization. - ([#1697](https://github.com/informalsystems/ibc-rs/issues/1697)) diff --git a/.changelog/v0.10.0/bug-fixes/ibc/1710-fix-frozen-height-proto-conv.md b/.changelog/v0.10.0/bug-fixes/ibc/1710-fix-frozen-height-proto-conv.md deleted file mode 100644 index a8938f9d36..0000000000 --- a/.changelog/v0.10.0/bug-fixes/ibc/1710-fix-frozen-height-proto-conv.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix conversion to Protocol Buffers of `ClientState`'s `frozen_height` field. - ([#1710](https://github.com/informalsystems/ibc-rs/issues/1710)) \ No newline at end of file diff --git a/.changelog/v0.10.0/features/1410-dynamic-version.md b/.changelog/v0.10.0/features/1410-dynamic-version.md deleted file mode 100644 index 1ed737d8e3..0000000000 --- a/.changelog/v0.10.0/features/1410-dynamic-version.md +++ /dev/null @@ -1,2 +0,0 @@ -- Support dynamic versions in channel open handshake & enable full support for - ibc-go v2 ([#1410](https://github.com/informalsystems/ibc-rs/issues/1410)) diff --git a/.changelog/v0.10.0/features/1550-ci-gaiav6.md b/.changelog/v0.10.0/features/1550-ci-gaiav6.md deleted file mode 100644 index 6215a94ea8..0000000000 --- a/.changelog/v0.10.0/features/1550-ci-gaiav6.md +++ /dev/null @@ -1 +0,0 @@ -- Extend CI test suite to include E2E tests using Gaia v6.0.0 [#1550](https://github.com/informalsystems/ibc-rs/issues/1550) diff --git a/.changelog/v0.10.0/features/1606.md b/.changelog/v0.10.0/features/1606.md deleted file mode 100644 index 88dc7212b3..0000000000 --- a/.changelog/v0.10.0/features/1606.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added the `extra_wallets` parameter to `gm` to create additional funded wallets. -- Added the possibility of JSON output to `gm` by setting the environment variable `OUTPUT=json`. diff --git a/.changelog/v0.10.0/features/1633-allow-fee-granters.md b/.changelog/v0.10.0/features/1633-allow-fee-granters.md deleted file mode 100644 index 9c8344f7cf..0000000000 --- a/.changelog/v0.10.0/features/1633-allow-fee-granters.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added support for fee granters through config file - ([#1633](https://github.com/informalsystems/ibc-rs/issues/1633)) \ No newline at end of file diff --git a/.changelog/v0.10.0/features/ibc-relayer/1561-config-proof-specs.md b/.changelog/v0.10.0/features/ibc-relayer/1561-config-proof-specs.md deleted file mode 100644 index a9d4447b5e..0000000000 --- a/.changelog/v0.10.0/features/ibc-relayer/1561-config-proof-specs.md +++ /dev/null @@ -1,2 +0,0 @@ -- Allow custom proof-specs in chain config - ([#1561](https://github.com/informalsystems/ibc-rs/issues/1561)) \ No newline at end of file diff --git a/.changelog/v0.10.0/features/ibc/1583-module-verification-ICS07.md b/.changelog/v0.10.0/features/ibc/1583-module-verification-ICS07.md deleted file mode 100644 index 5f11ff8e2f..0000000000 --- a/.changelog/v0.10.0/features/ibc/1583-module-verification-ICS07.md +++ /dev/null @@ -1,3 +0,0 @@ -- Implement proof verification for Tendermint client (ICS07). - ([#1583](https://github.com/informalsystems/ibc-rs/pull/1583)) - diff --git a/.changelog/v0.10.0/improvements/ibc-relayer-cli/1063-event-monitor-on-demand.md b/.changelog/v0.10.0/improvements/ibc-relayer-cli/1063-event-monitor-on-demand.md deleted file mode 100644 index eba09eba33..0000000000 --- a/.changelog/v0.10.0/improvements/ibc-relayer-cli/1063-event-monitor-on-demand.md +++ /dev/null @@ -1,2 +0,0 @@ -- Improve performance of standalone commands by starting the event monitor on-demand - ([#1063](https://github.com/informalsystems/ibc-rs/issues/1063)) diff --git a/.changelog/v0.10.0/improvements/ibc-relayer-cli/1636.md b/.changelog/v0.10.0/improvements/ibc-relayer-cli/1636.md deleted file mode 100644 index 5c333db4f0..0000000000 --- a/.changelog/v0.10.0/improvements/ibc-relayer-cli/1636.md +++ /dev/null @@ -1,2 +0,0 @@ -- Increase the default for `max_gas` from `300_000` to `400_000` ([#1636](https://github.com/informalsystems/ibc-rs/pull/1636)) - diff --git a/.changelog/v0.10.0/improvements/ibc-relayer/1576-update-abscissa.md b/.changelog/v0.10.0/improvements/ibc-relayer/1576-update-abscissa.md deleted file mode 100644 index 31d107373e..0000000000 --- a/.changelog/v0.10.0/improvements/ibc-relayer/1576-update-abscissa.md +++ /dev/null @@ -1,6 +0,0 @@ -- Update to abscissa framework version 0.6.0-beta.1, adding support for - `--help` flags in subcommands and improving help and usage printouts. - The `--version` option of the `create channel` subcommand has been renamed - to `--channel-version`, with the old name still supported as an alias. - ([#1576](https://github.com/informalsystems/ibc-rs/pull/1576), - [#1743](https://github.com/informalsystems/ibc-rs/pull/1743)) diff --git a/.changelog/v0.10.0/improvements/ibc/1665-remove-chrono.md b/.changelog/v0.10.0/improvements/ibc/1665-remove-chrono.md deleted file mode 100644 index 477fa06f2d..0000000000 --- a/.changelog/v0.10.0/improvements/ibc/1665-remove-chrono.md +++ /dev/null @@ -1,4 +0,0 @@ -- More conventional ad-hoc conversion methods on `Timestamp` - ([#1665](https://github.com/informalsystems/ibc-rs/pull/1665)): - - `Timestamp::nanoseconds` replaces `Timestamp::as_nanoseconds` - - `Timestamp::into_datetime` substitutes `Timestamp::as_datetime` diff --git a/.changelog/v0.10.0/summary.md b/.changelog/v0.10.0/summary.md deleted file mode 100644 index 7d8ccc26c6..0000000000 --- a/.changelog/v0.10.0/summary.md +++ /dev/null @@ -1,16 +0,0 @@ -*January 13th, 2021* - -This release notably updates the underlying CLI framework (`abscissa`) to version 0.6.0-beta.1, -which now uses `clap` for parsing command line arguments. This substantially improves the UX of the CLI, -by adding support for `--help` flags in subcommands and improving help and usage printouts. - -The `--version` option of the `create channel` subcommand has been renamed -to `--channel-version`, with the old name still supported as an alias. -Additionally, the `-h` short flag on many commands is now `-H` to avoid -clashes with the clap-provided short flag for help. - -This release also improves the handling of account sequence mismatch errors, -with a recovery mechanism to automatically retry or drop tx upon such errors. - -The relayer now also supports dynamic versions in channel open handshake (which is needed by Interchain Accounts), and enables full support for IBC v2. - diff --git a/.changelog/v0.11.0/1749-build-aarch64.md b/.changelog/v0.11.0/1749-build-aarch64.md deleted file mode 100644 index 5039c18a3f..0000000000 --- a/.changelog/v0.11.0/1749-build-aarch64.md +++ /dev/null @@ -1,2 +0,0 @@ -- Hermes builds for Linux on AArch64 are now released. - ([#1749](https://github.com/informalsystems/ibc-rs/pull/1749)) diff --git a/.changelog/v0.11.0/breaking-changes/1612-ibc-clock.md b/.changelog/v0.11.0/breaking-changes/1612-ibc-clock.md deleted file mode 100644 index 40954b86a6..0000000000 --- a/.changelog/v0.11.0/breaking-changes/1612-ibc-clock.md +++ /dev/null @@ -1 +0,0 @@ -- Hide `ibc::Timestamp::now()` behind `clock` feature flag (#1612)[https://github.com/informalsystems/ibc-rs/issues/1612] diff --git a/.changelog/v0.11.0/breaking-changes/1765-msrv-1.58.md b/.changelog/v0.11.0/breaking-changes/1765-msrv-1.58.md deleted file mode 100644 index eafdc89f88..0000000000 --- a/.changelog/v0.11.0/breaking-changes/1765-msrv-1.58.md +++ /dev/null @@ -1 +0,0 @@ -- Update MSRV to Rust 1.58 ([#1765](https://github.com/informalsystems/ibc-rs/issues/1765)) diff --git a/.changelog/v0.11.0/breaking-changes/1767-tendermint-rs-0.23.5.md b/.changelog/v0.11.0/breaking-changes/1767-tendermint-rs-0.23.5.md deleted file mode 100644 index 4dff907825..0000000000 --- a/.changelog/v0.11.0/breaking-changes/1767-tendermint-rs-0.23.5.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update tendermint-rs dependencies to 0.23.5 - ([#1767](https://github.com/informalsystems/ibc-rs/issues/1767)) \ No newline at end of file diff --git a/.changelog/v0.11.0/breaking-changes/1817-remove-filter-option.md b/.changelog/v0.11.0/breaking-changes/1817-remove-filter-option.md deleted file mode 100644 index a659b122f7..0000000000 --- a/.changelog/v0.11.0/breaking-changes/1817-remove-filter-option.md +++ /dev/null @@ -1,2 +0,0 @@ -- Remove `mode.packets.filter` config option and enable filtering by default - ([#1817](https://github.com/informalsystems/ibc-rs/issues/1817)) \ No newline at end of file diff --git a/.changelog/v0.11.0/breaking-changes/ibc-relayer/1662-configurable-upgrade-denom.md b/.changelog/v0.11.0/breaking-changes/ibc-relayer/1662-configurable-upgrade-denom.md deleted file mode 100644 index a4ce9eedcb..0000000000 --- a/.changelog/v0.11.0/breaking-changes/ibc-relayer/1662-configurable-upgrade-denom.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added a `denom` member to `upgrade_chain::UpgradePlanOptions`. - ([#1662](https://github.com/informalsystems/ibc-rs/issues/1662)) diff --git a/.changelog/v0.11.0/breaking-changes/ibc-relayer/1807-foreign-client-create-params.md b/.changelog/v0.11.0/breaking-changes/ibc-relayer/1807-foreign-client-create-params.md deleted file mode 100644 index a57a9ca5b8..0000000000 --- a/.changelog/v0.11.0/breaking-changes/ibc-relayer/1807-foreign-client-create-params.md +++ /dev/null @@ -1,4 +0,0 @@ -- `foreign_client::CreateParams` struct added, passed as the parameter to - `ForeignClient::build_create_client` and - `ForeignClient::build_create_client_and_send`. - ([#1807](https://github.com/informalsystems/ibc-rs/pull/1807)) diff --git a/.changelog/v0.11.0/bug-fixes/ibc/1745-fix-consensus-proof-verification.md b/.changelog/v0.11.0/bug-fixes/ibc/1745-fix-consensus-proof-verification.md deleted file mode 100644 index 0c8103df5f..0000000000 --- a/.changelog/v0.11.0/bug-fixes/ibc/1745-fix-consensus-proof-verification.md +++ /dev/null @@ -1 +0,0 @@ -- Verify the client consensus proof against the client's consensus state root and not the host's state root diff --git a/.changelog/v0.11.0/bug-fixes/ibc/1763-init-consensus-meta-on-client-create.md b/.changelog/v0.11.0/bug-fixes/ibc/1763-init-consensus-meta-on-client-create.md deleted file mode 100644 index c32fbad959..0000000000 --- a/.changelog/v0.11.0/bug-fixes/ibc/1763-init-consensus-meta-on-client-create.md +++ /dev/null @@ -1,2 +0,0 @@ -- Initialize consensus metadata on client creation - ([#1763](https://github.com/informalsystems/ibc-rs/issues/1763)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/1536-fast-start.md b/.changelog/v0.11.0/improvements/1536-fast-start.md deleted file mode 100644 index 4141495364..0000000000 --- a/.changelog/v0.11.0/improvements/1536-fast-start.md +++ /dev/null @@ -1,3 +0,0 @@ -- Improve startup time of the relayer - - When scanning a chain with filtering enabled and an allow list, skip scanning all the clients and query the allowed channels directly. This results in much fewer queries and a faster start. - - Add a `--full-scan` option to `hermes start` to opt out of the fast start mechanism and do a full scan. diff --git a/.changelog/v0.11.0/improvements/1641-tendermint-0.23.4.md b/.changelog/v0.11.0/improvements/1641-tendermint-0.23.4.md deleted file mode 100644 index df22ac3c78..0000000000 --- a/.changelog/v0.11.0/improvements/1641-tendermint-0.23.4.md +++ /dev/null @@ -1,3 +0,0 @@ -- Update `tendermint-rs` to v0.23.4 and harmonize the dependencies to use a single TLS stack. - A system installation of OpenSSL is no longer required to build Hermes. - ([#1641](https://github.com/informalsystems/ibc-rs/issues/1641)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/1687-remove-mock-sleep.md b/.changelog/v0.11.0/improvements/1687-remove-mock-sleep.md deleted file mode 100644 index beb9510f9c..0000000000 --- a/.changelog/v0.11.0/improvements/1687-remove-mock-sleep.md +++ /dev/null @@ -1 +0,0 @@ -- Remove 1 second sleep in `generate_tm_block` during testing with mock context. [#1687](https://github.com/informalsystems/ibc-rs/issues/1687) diff --git a/.changelog/v0.11.0/improvements/ibc-relayer-cli/1662-configurable-upgrade-denom.md b/.changelog/v0.11.0/improvements/ibc-relayer-cli/1662-configurable-upgrade-denom.md deleted file mode 100644 index 324bb40250..0000000000 --- a/.changelog/v0.11.0/improvements/ibc-relayer-cli/1662-configurable-upgrade-denom.md +++ /dev/null @@ -1,2 +0,0 @@ -- Make the deposit denomination configurable in `tx raw upgrade-chain` via a new `--denom` flag. - ([#1662](https://github.com/informalsystems/ibc-rs/issues/1662)) diff --git a/.changelog/v0.11.0/improvements/ibc-relayer-cli/1777-update-abscissa-and-clap.md b/.changelog/v0.11.0/improvements/ibc-relayer-cli/1777-update-abscissa-and-clap.md deleted file mode 100644 index 75f584a3e8..0000000000 --- a/.changelog/v0.11.0/improvements/ibc-relayer-cli/1777-update-abscissa-and-clap.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update to abscissa_core 0.6.0-rc.0 and clap 3.x - ([#1777](https://github.com/informalsystems/ibc-rs/pull/1777)) diff --git a/.changelog/v0.11.0/improvements/ibc-relayer-cli/1789-cli-completions.md b/.changelog/v0.11.0/improvements/ibc-relayer-cli/1789-cli-completions.md deleted file mode 100644 index 7d46b207e3..0000000000 --- a/.changelog/v0.11.0/improvements/ibc-relayer-cli/1789-cli-completions.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add `completions` CLI command to generate shell auto-completion scripts. - ([#1789](https://github.com/informalsystems/ibc-rs/pull/1789)) diff --git a/.changelog/v0.11.0/improvements/ibc-relayer-cli/836-create-client-options.md b/.changelog/v0.11.0/improvements/ibc-relayer-cli/836-create-client-options.md deleted file mode 100644 index 635234e9aa..0000000000 --- a/.changelog/v0.11.0/improvements/ibc-relayer-cli/836-create-client-options.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add custom options to the `create client` command. - ([#836](https://github.com/informalsystems/ibc-rs/issues/836)) diff --git a/.changelog/v0.11.0/improvements/ibc-relayer/1481-chainendpoint-any-consensus-state.md b/.changelog/v0.11.0/improvements/ibc-relayer/1481-chainendpoint-any-consensus-state.md deleted file mode 100644 index 743dc14e31..0000000000 --- a/.changelog/v0.11.0/improvements/ibc-relayer/1481-chainendpoint-any-consensus-state.md +++ /dev/null @@ -1,2 +0,0 @@ -- Allow `ChainEndpoint` implementations to fetch any types of clients - and consensus states ([#1481](https://github.com/informalsystems/ibc-rs/issues/1481)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/ibc-relayer/1491-structured-logs.md b/.changelog/v0.11.0/improvements/ibc-relayer/1491-structured-logs.md deleted file mode 100644 index 1f1ae711e9..0000000000 --- a/.changelog/v0.11.0/improvements/ibc-relayer/1491-structured-logs.md +++ /dev/null @@ -1,2 +0,0 @@ -- More structural logging in relayer, using tracing spans and key-value pairs. - ([#1491](https://github.com/informalsystems/ibc-rs/pull/1491)) diff --git a/.changelog/v0.11.0/improvements/ibc-relayer/1785-clarify-ethermint-keys.md b/.changelog/v0.11.0/improvements/ibc-relayer/1785-clarify-ethermint-keys.md deleted file mode 100644 index 94e4e72be4..0000000000 --- a/.changelog/v0.11.0/improvements/ibc-relayer/1785-clarify-ethermint-keys.md +++ /dev/null @@ -1,2 +0,0 @@ -- Improved documention w.r.t. keys for Ethermint-based chains - ([#1785](https://github.com/informalsystems/ibc-rs/issues/1785)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/ibc/1760-path-variants-as-types.md b/.changelog/v0.11.0/improvements/ibc/1760-path-variants-as-types.md deleted file mode 100644 index 3045efb67e..0000000000 --- a/.changelog/v0.11.0/improvements/ibc/1760-path-variants-as-types.md +++ /dev/null @@ -1,2 +0,0 @@ -- Extract all `ics24_host::Path` variants into their separate types - ([#1760](https://github.com/informalsystems/ibc-rs/issues/1760)) \ No newline at end of file diff --git a/.changelog/v0.11.0/improvements/ibc/1761-disallow-empty-commitment-prefix-and-proof.md b/.changelog/v0.11.0/improvements/ibc/1761-disallow-empty-commitment-prefix-and-proof.md deleted file mode 100644 index d59818fd55..0000000000 --- a/.changelog/v0.11.0/improvements/ibc/1761-disallow-empty-commitment-prefix-and-proof.md +++ /dev/null @@ -1,2 +0,0 @@ -- Disallow empty `CommitmentPrefix` and `CommitmentProofBytes` - ([#1761](https://github.com/informalsystems/ibc-rs/issues/1761)) \ No newline at end of file diff --git a/.changelog/v0.11.0/summary.md b/.changelog/v0.11.0/summary.md deleted file mode 100644 index 8a19f4003f..0000000000 --- a/.changelog/v0.11.0/summary.md +++ /dev/null @@ -1,36 +0,0 @@ -This release notably speeds up the startup time of Hermes, -adds options to the `create client` command to customize the client parameters, -makes the deposit denomination configurable in `tx raw upgrade-chain` via a new `--denom` flag, -and adds a `completions` CLI command to generate shell auto-completion scripts. - -### Note for operators - -This release includes a breaking change, which requires the configuration file to be edited. -The `mode.packets.filter` configuration option has been removed and is now enabled by default. - -Before running Hermes v0.11.0, make sure you remove the `mode.packets.filter` option from the configuration file. - -```diff ---- a/config.toml -+++ b/config.toml -@@ -50,18 +50,6 @@ clear_interval = 100 - # Whether or not to clear packets on start. [Default: false] - clear_on_start = true - --# Enable or disable the filtering mechanism. --# Valid options are 'true', 'false'. --# Currently Hermes supports two filters: --# 1. Packet filtering on a per-chain basis; see the chain-specific --# filter specification below in [chains.packet_filter]. --# 2. Filter for all activities based on client state trust threshold; this filter --# is parametrized with (numerator = 1, denominator = 3), so that clients with --# thresholds different than this will be ignored. --# If set to 'true', both of the above filters will be enabled. --# [Default: false] --filter = false -- - # Toggle the transaction confirmation mechanism. - # The tx confirmation mechanism periodically queries the `/tx_search` RPC - # endpoint to check that previously-submitted transactions -``` - diff --git a/.changelog/v0.11.1/bug-fixes/ibc-relayer-cli/1822-skip-config-for-completions.md b/.changelog/v0.11.1/bug-fixes/ibc-relayer-cli/1822-skip-config-for-completions.md deleted file mode 100644 index 36eed16cc4..0000000000 --- a/.changelog/v0.11.1/bug-fixes/ibc-relayer-cli/1822-skip-config-for-completions.md +++ /dev/null @@ -1,2 +0,0 @@ -- Do not require a config file to be present for the `completions` command. - ([#1822](https://github.com/informalsystems/ibc-rs/pull/1822)) diff --git a/.changelog/v0.11.1/improvements/ibc-relayer/1389-add-connection-handshake-verification-logic.md b/.changelog/v0.11.1/improvements/ibc-relayer/1389-add-connection-handshake-verification-logic.md deleted file mode 100644 index be5a15dae4..0000000000 --- a/.changelog/v0.11.1/improvements/ibc-relayer/1389-add-connection-handshake-verification-logic.md +++ /dev/null @@ -1 +0,0 @@ -- Add missing checks for `ConnectionEnd::version` and `Counterparty::prefix` fields in the `check_destination_connection_state` method. ([#1389](https://github.com/informalsystems/ibc-rs/issues/1389)) diff --git a/.changelog/v0.11.1/improvements/ibc-relayer/1663-pending-timeout.md b/.changelog/v0.11.1/improvements/ibc-relayer/1663-pending-timeout.md deleted file mode 100644 index d9da56b8c7..0000000000 --- a/.changelog/v0.11.1/improvements/ibc-relayer/1663-pending-timeout.md +++ /dev/null @@ -1,2 +0,0 @@ -- Increased tx confirmation timeout to 300s to prevent aggressive tx - resubmission ([#1663](https://github.com/informalsystems/ibc-rs/issues/1663)) \ No newline at end of file diff --git a/.changelog/v0.11.1/improvements/ibc-relayer/1793-begin-end-block-chan-events.md b/.changelog/v0.11.1/improvements/ibc-relayer/1793-begin-end-block-chan-events.md deleted file mode 100644 index 369f3c51da..0000000000 --- a/.changelog/v0.11.1/improvements/ibc-relayer/1793-begin-end-block-chan-events.md +++ /dev/null @@ -1,2 +0,0 @@ -- Handle channel events originating from Tendermint ABCI's BeginBlock and EndBlock methods - ([#1793](https://github.com/informalsystems/ibc-rs/issues/1793)) \ No newline at end of file diff --git a/.changelog/v0.11.1/summary.md b/.changelog/v0.11.1/summary.md deleted file mode 100644 index 5a5f1350b1..0000000000 --- a/.changelog/v0.11.1/summary.md +++ /dev/null @@ -1 +0,0 @@ -This release adds support for channel events originating from Tendermint ABCI's `BeginBlock` and `EndBlock` methods. diff --git a/.changelog/v0.12.0/bug-fixes/ibc-relayer-cli/1885-disable-config-reload.md b/.changelog/v0.12.0/bug-fixes/ibc-relayer-cli/1885-disable-config-reload.md deleted file mode 100644 index 47df3b67a2..0000000000 --- a/.changelog/v0.12.0/bug-fixes/ibc-relayer-cli/1885-disable-config-reload.md +++ /dev/null @@ -1 +0,0 @@ -- Disable reloading of configuration upon receiving a SIGHUP signal. ([#1885](https://github.com/informalsystems/ibc-rs/issues/1885)) diff --git a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1837-non-standard-ports.md b/.changelog/v0.12.0/bug-fixes/ibc-relayer/1837-non-standard-ports.md deleted file mode 100644 index 758a217a66..0000000000 --- a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1837-non-standard-ports.md +++ /dev/null @@ -1,2 +0,0 @@ -- Handle non-standard ports in channel handshake - ([#1837](https://github.com/informalsystems/ibc-rs/issues/1837)) diff --git a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1844-duplicate-send-packet-events.md b/.changelog/v0.12.0/bug-fixes/ibc-relayer/1844-duplicate-send-packet-events.md deleted file mode 100644 index 88cafb5b79..0000000000 --- a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1844-duplicate-send-packet-events.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix duplicate SendPacket events emitted by EndBlock - ([#1844](https://github.com/informalsystems/ibc-rs/issues/1844)) \ No newline at end of file diff --git a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1861-non-standard-ports.md b/.changelog/v0.12.0/bug-fixes/ibc-relayer/1861-non-standard-ports.md deleted file mode 100644 index 96ab22af07..0000000000 --- a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1861-non-standard-ports.md +++ /dev/null @@ -1,3 +0,0 @@ -- Fix support for non-standard ports in channel handshake - ([#1861](https://github.com/informalsystems/ibc-rs/issues/1861), - [#1837](https://github.com/informalsystems/ibc-rs/issues/1837)) diff --git a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1872-clear-packets.md b/.changelog/v0.12.0/bug-fixes/ibc-relayer/1872-clear-packets.md deleted file mode 100644 index 4a72814e75..0000000000 --- a/.changelog/v0.12.0/bug-fixes/ibc-relayer/1872-clear-packets.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fixed bug where Hermes cleared packets at startup, despite - `clear_on_start = false` ([#1872](https://github.com/informalsystems/ibc-rs/issues/1872)) diff --git a/.changelog/v0.12.0/bug-fixes/ibc/1706-fix-formatting-for-some-tendermint-errors.md b/.changelog/v0.12.0/bug-fixes/ibc/1706-fix-formatting-for-some-tendermint-errors.md deleted file mode 100644 index ad064f0e4c..0000000000 --- a/.changelog/v0.12.0/bug-fixes/ibc/1706-fix-formatting-for-some-tendermint-errors.md +++ /dev/null @@ -1,3 +0,0 @@ -- Fixed the formatting of NotEnoughTimeElapsed and NotEnoughBlocksElapsed - in tendermint errors - ([#1706](https://github.com/informalsystems/ibc-rs/issues/1706)) diff --git a/.changelog/v0.12.0/bug-fixes/ibc/1770-deterministic-host-timestamp.md b/.changelog/v0.12.0/bug-fixes/ibc/1770-deterministic-host-timestamp.md deleted file mode 100644 index 821801926b..0000000000 --- a/.changelog/v0.12.0/bug-fixes/ibc/1770-deterministic-host-timestamp.md +++ /dev/null @@ -1,2 +0,0 @@ -- IBC handlers now retrieve the host timestamp from the latest host consensus - state ([#1770](https://github.com/informalsystems/ibc-rs/issues/1770)) \ No newline at end of file diff --git a/.changelog/v0.12.0/features/1797-ibc-v3.md b/.changelog/v0.12.0/features/1797-ibc-v3.md deleted file mode 100644 index fbc07d90ff..0000000000 --- a/.changelog/v0.12.0/features/1797-ibc-v3.md +++ /dev/null @@ -1,2 +0,0 @@ -- Upgrade protos and compatibility to IBC v3.0.0-rc.0 and Cosmos SDK v0.45.1 - ([#1797](https://github.com/informalsystems/ibc-rs/issues/1797)) diff --git a/.changelog/v0.12.0/features/ibc-relayer-cli/1895-rust-log.md b/.changelog/v0.12.0/features/ibc-relayer-cli/1895-rust-log.md deleted file mode 100644 index 337b05d53a..0000000000 --- a/.changelog/v0.12.0/features/ibc-relayer-cli/1895-rust-log.md +++ /dev/null @@ -1,2 +0,0 @@ -- Allow overriding the tracing filter with `RUST_LOG` environment variable - ([#1895](https://github.com/informalsystems/ibc-rs/issues/1895)) \ No newline at end of file diff --git a/.changelog/v0.12.0/improvements/ibc-relayer-cli/1834-clear-packets-cmd.md b/.changelog/v0.12.0/improvements/ibc-relayer-cli/1834-clear-packets-cmd.md deleted file mode 100644 index d9bb22fb4d..0000000000 --- a/.changelog/v0.12.0/improvements/ibc-relayer-cli/1834-clear-packets-cmd.md +++ /dev/null @@ -1,3 +0,0 @@ -- Added `clear packets` command, combining the effects of - `tx raw packet-recv` and `tx raw packet-ack`. - ([#1834](https://github.com/informalsystems/ibc-rs/pull/1834)) diff --git a/.changelog/v0.12.0/improvements/ibc-relayer/1388-more-health-checks.md b/.changelog/v0.12.0/improvements/ibc-relayer/1388-more-health-checks.md deleted file mode 100644 index acd8d55307..0000000000 --- a/.changelog/v0.12.0/improvements/ibc-relayer/1388-more-health-checks.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add two more health checks: tx indexing enabled and historical entries > 0 - ([#1388](https://github.com/informalsystems/ibc-rs/issues/1388)) diff --git a/.changelog/v0.12.0/improvements/ibc-relayer/1880-nonallocating-verions-method.md b/.changelog/v0.12.0/improvements/ibc-relayer/1880-nonallocating-verions-method.md deleted file mode 100644 index ecba32327b..0000000000 --- a/.changelog/v0.12.0/improvements/ibc-relayer/1880-nonallocating-verions-method.md +++ /dev/null @@ -1 +0,0 @@ -- Changed `ConnectionEnd::versions` method to be non-allocating by having it return a `&[Version]` instead of `Vec`. ([#1880](https://github.com/informalsystems/ibc-rs/pull/1880)) diff --git a/.changelog/v0.12.0/improvements/ibc/1706-add-client-state-tests.md b/.changelog/v0.12.0/improvements/ibc/1706-add-client-state-tests.md deleted file mode 100644 index 4fb330d8ab..0000000000 --- a/.changelog/v0.12.0/improvements/ibc/1706-add-client-state-tests.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added more unit tests to verify Tendermint ClientState - ([#1706](https://github.com/informalsystems/ibc-rs/issues/1706)) \ No newline at end of file diff --git a/.changelog/v0.12.0/improvements/ibc/1769-cap-reader-keeper.md b/.changelog/v0.12.0/improvements/ibc/1769-cap-reader-keeper.md deleted file mode 100644 index 6e8f46352f..0000000000 --- a/.changelog/v0.12.0/improvements/ibc/1769-cap-reader-keeper.md +++ /dev/null @@ -1,2 +0,0 @@ -- Define CapabilityReader and CapabilityKeeper traits - ([#1769](https://github.com/informalsystems/ibc-rs/issues/1769)) \ No newline at end of file diff --git a/.changelog/v0.12.0/summary.md b/.changelog/v0.12.0/summary.md deleted file mode 100644 index 2801cc0b94..0000000000 --- a/.changelog/v0.12.0/summary.md +++ /dev/null @@ -1,13 +0,0 @@ -This release notably brings compatibility with Cosmos SDK 0.45 and IBC v3.0.0, -as well as support for non-standard ports in the channel handshake. -It also contains a fix for a bug where `SendPacket` events were duplicated when emitted at EndBlock, -and fixes another bug where Hermes would clear packet at startup even when `clear_on_start = false`. -Additionally, a new CLI command `clear packets` has been added for clearing packets in both direction on a given channel. -The relayer will now also honor the `tracing` filter specified in the `RUST_LOG` environment variable, if any. - -### Note for operators - -As of this release, the relayer will not respond to the `SIGHUP` signal and will therefore -not reload the configuration anymore. This feature has been deemed unnecessary given the -recent performance improvements, and it is now recommended to just restart the relayer -when the configuration is updated. diff --git a/.changelog/v0.13.0/bug-fixes/ibc-relayer/1835-ordered-channels.md b/.changelog/v0.13.0/bug-fixes/ibc-relayer/1835-ordered-channels.md deleted file mode 100644 index a326c7f02e..0000000000 --- a/.changelog/v0.13.0/bug-fixes/ibc-relayer/1835-ordered-channels.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fixed relayer behavior on ordered channels - ([#1835](https://github.com/informalsystems/ibc-rs/issues/1835)) \ No newline at end of file diff --git a/.changelog/v0.13.0/bug-fixes/ibc-relayer/1991-packet-worker-chan-open.md b/.changelog/v0.13.0/bug-fixes/ibc-relayer/1991-packet-worker-chan-open.md deleted file mode 100644 index 6f8c9a4418..0000000000 --- a/.changelog/v0.13.0/bug-fixes/ibc-relayer/1991-packet-worker-chan-open.md +++ /dev/null @@ -1,2 +0,0 @@ -- Do not spawn packet worker on chan open ack/confirm events - ([#1991](https://github.com/informalsystems/ibc-rs/issues/1991)) \ No newline at end of file diff --git a/.changelog/v0.13.0/bug-fixes/ibc-relayer/2008-slow-relayer.md b/.changelog/v0.13.0/bug-fixes/ibc-relayer/2008-slow-relayer.md deleted file mode 100644 index b0ca5dfd76..0000000000 --- a/.changelog/v0.13.0/bug-fixes/ibc-relayer/2008-slow-relayer.md +++ /dev/null @@ -1,3 +0,0 @@ -- Fix a bug which would cause the relayer to slow down exponentially when either - the average block time was low or when it was relaying on too many chains at - once ([#2008](https://github.com/informalsystems/ibc-rs/issues/2008)) diff --git a/.changelog/v0.13.0/features/ibc-proto/1913-cosmwasm-support.md b/.changelog/v0.13.0/features/ibc-proto/1913-cosmwasm-support.md deleted file mode 100644 index f5c247276b..0000000000 --- a/.changelog/v0.13.0/features/ibc-proto/1913-cosmwasm-support.md +++ /dev/null @@ -1,5 +0,0 @@ -- Add CosmWasm support to the generated Protobuf code ([#1913](https://github.com/informalsystems/ibc-rs/issues/1913)) - * Add a new `client` feature to gate the tonic client code, implies the `std` feature. - * Add a new `json-schema` feature to derive `schemars::JsonSchema` on some proto types, implies the `std` feature. - * Add `#[serde(default)]` to fields that might be omitted by Golang `omitempty` directive. - * Change serialization of byte arrays to Base64 for compatibility with Go. diff --git a/.changelog/v0.13.0/features/ibc-proto/1988-serde-serialize-deserialize.md b/.changelog/v0.13.0/features/ibc-proto/1988-serde-serialize-deserialize.md deleted file mode 100644 index ed2b6ad8a3..0000000000 --- a/.changelog/v0.13.0/features/ibc-proto/1988-serde-serialize-deserialize.md +++ /dev/null @@ -1,3 +0,0 @@ -- Derive `Serialize` and `Deserialize` for `ibc-proto::ibc::core` and `ibc_proto::ibc::applications` structs, - and switch to Google's Protobuf standard types instead of Prost's types. - ([#1988](https://github.com/informalsystems/ibc-rs/issues/1988)) diff --git a/.changelog/v0.13.0/features/ibc-relayer/1908-caching-layer.md b/.changelog/v0.13.0/features/ibc-relayer/1908-caching-layer.md deleted file mode 100644 index a27e275b40..0000000000 --- a/.changelog/v0.13.0/features/ibc-relayer/1908-caching-layer.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added caching layer for hermes start command - ([#1908](https://github.com/informalsystems/ibc-rs/issues/1908)) \ No newline at end of file diff --git a/.changelog/v0.13.0/features/ibc-relayer/1927-packet-filtering-wildcards.md b/.changelog/v0.13.0/features/ibc-relayer/1927-packet-filtering-wildcards.md deleted file mode 100644 index 0b4f8be40f..0000000000 --- a/.changelog/v0.13.0/features/ibc-relayer/1927-packet-filtering-wildcards.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add support for wildcards in port and channel identifiers in the packet filter configuration, - which enable operators to filter ICA channels based on the port prefix - ([#1927](https://github.com/informalsystems/ibc-rs/issues/1927)) diff --git a/.changelog/v0.13.0/improvements/ibc-integration-test/1961-test-framework.md b/.changelog/v0.13.0/improvements/ibc-integration-test/1961-test-framework.md deleted file mode 100644 index 8a998cdd9e..0000000000 --- a/.changelog/v0.13.0/improvements/ibc-integration-test/1961-test-framework.md +++ /dev/null @@ -1 +0,0 @@ -- Split out test framework as new crate `ibc-test-framework` from `ibc-integration-test`. ([#1961](https://github.com/informalsystems/ibc-rs/pull/1961)) diff --git a/.changelog/v0.13.0/improvements/ibc-relayer-cli/1559-cli-output.md b/.changelog/v0.13.0/improvements/ibc-relayer-cli/1559-cli-output.md deleted file mode 100644 index b436c92549..0000000000 --- a/.changelog/v0.13.0/improvements/ibc-relayer-cli/1559-cli-output.md +++ /dev/null @@ -1 +0,0 @@ -- Print packet data on one line ([#1559](https://github.com/informalsystems/ibc-rs/issues/1559)) diff --git a/.changelog/v0.13.0/improvements/ibc-relayer/1908-caching-layer-documentation.md b/.changelog/v0.13.0/improvements/ibc-relayer/1908-caching-layer-documentation.md deleted file mode 100644 index c215b025ce..0000000000 --- a/.changelog/v0.13.0/improvements/ibc-relayer/1908-caching-layer-documentation.md +++ /dev/null @@ -1 +0,0 @@ -- Add documentation for the caching layer implemented in ([#1908](https://github.com/informalsystems/ibc-rs/issues/1908)) diff --git a/.changelog/v0.13.0/improvements/ibc/718-rework-ics04_channel-events.md b/.changelog/v0.13.0/improvements/ibc/718-rework-ics04_channel-events.md deleted file mode 100644 index f3272acf64..0000000000 --- a/.changelog/v0.13.0/improvements/ibc/718-rework-ics04_channel-events.md +++ /dev/null @@ -1,2 +0,0 @@ -- Refactored ics04_channel events - ([#718](https://github.com/informalsystems/ibc-rs/issues/718)) \ No newline at end of file diff --git a/.changelog/v0.13.0/summary.md b/.changelog/v0.13.0/summary.md deleted file mode 100644 index 041f9fce19..0000000000 --- a/.changelog/v0.13.0/summary.md +++ /dev/null @@ -1,59 +0,0 @@ -*March 28th, 2022* - -Hermes v0.13.0 improves performance by lowering the pressure -on the full nodes by adding a caching layer for some queries. -It also fixes a bug which could cause an exponential slowdown -when relaying between many chains with a low average block time. - -This release also add support for wildcards in port and channel identifiers -in the packet filter configuration, which enable operators to filter -ICA channels based on the port prefix. - -Additionally, the IBC Protocol Buffers definitions can now be used from CosmWasm. - -## Note for operators - -As of version 0.13.0, Hermes supports relaying on [Interchain Accounts][ica] channels. - -If the `packet_filter` option in the chain configuration is disabled, then -Hermes will relay on all existing and future channels, including ICA channels. - -There are two kinds of ICA channels: - -1. The host channels, whose port is `icahost` -2. The controller channels, whose port starts with `icacontroller-` followed - by the owner account address. [See the spec for more details][ica]. - -If you wish to only relay on a few specific standard channels (here `channel-0` and `channel-1`), -but also relay on all ICA channels, you can specify the following packet filter: - -> Note the use of wildcards in the port and channel identifiers (`['ica*', '*']`) -> to match over all the possible ICA ports. - -```toml -[chains.packet_filter] -policy = 'allow' -list = [ - ['ica*', '*'], # allow relaying on all channels whose port starts with `ica` - ['transfer', 'channel-0'], - ['transfer', 'channel-1'], - # Add any other port/channel pairs you wish to relay on -] -``` - -If you wish to relay on all channels but not on ICA channels, you can use -the following packet filter configuration: - -```toml -[chains.packet_filter] -policy = 'deny' -list = [ - ['ica*', '*'], # deny relaying on all channels whose port starts with `ica` -] -``` - -This information can also be found in the [Hermes guide][guide-ica]. - -[ica]: https://github.com/cosmos/ibc/blob/master/spec/app/ics-027-interchain-accounts/README.md -[guide-ica]: https://hermes.informal.systems/config.html#support-for-interchain-accounts - diff --git a/.changelog/v0.14.0/breaking-changes/2081-msrv-1.60.md b/.changelog/v0.14.0/breaking-changes/2081-msrv-1.60.md deleted file mode 100644 index 5b1b8897d6..0000000000 --- a/.changelog/v0.14.0/breaking-changes/2081-msrv-1.60.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update MSRV to Rust 1.60 - ([#2081](https://github.com/informalsystems/ibc-rs/issues/2081)) diff --git a/.changelog/v0.14.0/bug-fixes/ibc-relayer-cli/1288-upgrade-chain-confirmation.md b/.changelog/v0.14.0/bug-fixes/ibc-relayer-cli/1288-upgrade-chain-confirmation.md deleted file mode 100644 index 656ed75bde..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc-relayer-cli/1288-upgrade-chain-confirmation.md +++ /dev/null @@ -1,2 +0,0 @@ -- Skip waiting for confirmation events on tx raw upgrade-chain - ([#1288](https://github.com/informalsystems/ibc-rs/issues/1288)) \ No newline at end of file diff --git a/.changelog/v0.14.0/bug-fixes/ibc-relayer-cli/1921-create-client-options.md b/.changelog/v0.14.0/bug-fixes/ibc-relayer-cli/1921-create-client-options.md deleted file mode 100644 index 2c2135d998..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc-relayer-cli/1921-create-client-options.md +++ /dev/null @@ -1,2 +0,0 @@ -- Apply client options specified with the `create client` command. - ([#1921](https://github.com/informalsystems/ibc-rs/issues/1921)) diff --git a/.changelog/v0.14.0/bug-fixes/ibc-relayer/1772-fix-conn-delay-check.md b/.changelog/v0.14.0/bug-fixes/ibc-relayer/1772-fix-conn-delay-check.md deleted file mode 100644 index 3bbf8ce62f..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc-relayer/1772-fix-conn-delay-check.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix the connection delay logic to use the timestamp of the host block when the client update header was installed. - ([#1772](https://github.com/informalsystems/ibc-rs/issues/1772)) \ No newline at end of file diff --git a/.changelog/v0.14.0/bug-fixes/ibc-relayer/1792-fix-hermes-retrying-not-regenerating-msgs.md b/.changelog/v0.14.0/bug-fixes/ibc-relayer/1792-fix-hermes-retrying-not-regenerating-msgs.md deleted file mode 100644 index 7e01a0fb21..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc-relayer/1792-fix-hermes-retrying-not-regenerating-msgs.md +++ /dev/null @@ -1 +0,0 @@ -- Fixed Hermes retrying mechanism not regenerating operational data for messages ([#1792](https://github.com/informalsystems/ibc-rs/pull/1951)) diff --git a/.changelog/v0.14.0/bug-fixes/ibc-relayer/1998-default-max-block-time.md b/.changelog/v0.14.0/bug-fixes/ibc-relayer/1998-default-max-block-time.md deleted file mode 100644 index c6d688f593..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc-relayer/1998-default-max-block-time.md +++ /dev/null @@ -1,2 +0,0 @@ -- Adjusted max_block_time default value to 30s - ([#1998](https://github.com/informalsystems/ibc-rs/issues/1998)) \ No newline at end of file diff --git a/.changelog/v0.14.0/bug-fixes/ibc-relayer/2075-wildcard-filter-middle.md b/.changelog/v0.14.0/bug-fixes/ibc-relayer/2075-wildcard-filter-middle.md deleted file mode 100644 index f8e3a59605..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc-relayer/2075-wildcard-filter-middle.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix a bug in the wildcard filter where pattern would match in the middle of a - string ([#2075](https://github.com/informalsystems/ibc-rs/issues/2075)) \ No newline at end of file diff --git a/.changelog/v0.14.0/bug-fixes/ibc-relayer/2097-misbehavior-height.md b/.changelog/v0.14.0/bug-fixes/ibc-relayer/2097-misbehavior-height.md deleted file mode 100644 index ef2ed9d7ea..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc-relayer/2097-misbehavior-height.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fixed target height used in misbehavior detection. - ([#2097](https://github.com/informalsystems/ibc-rs/issues/2097)) \ No newline at end of file diff --git a/.changelog/v0.14.0/bug-fixes/ibc/2035-handler-event-height.md b/.changelog/v0.14.0/bug-fixes/ibc/2035-handler-event-height.md deleted file mode 100644 index 97c3a54a48..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc/2035-handler-event-height.md +++ /dev/null @@ -1,2 +0,0 @@ -- Make all handlers emit an IbcEvent with current host chain height as height parameter value. - ([#2035](https://github.com/informalsystems/ibc-rs/issues/2035)) \ No newline at end of file diff --git a/.changelog/v0.14.0/bug-fixes/ibc/2062-conn-open-init-version.md b/.changelog/v0.14.0/bug-fixes/ibc/2062-conn-open-init-version.md deleted file mode 100644 index 0d3b661368..0000000000 --- a/.changelog/v0.14.0/bug-fixes/ibc/2062-conn-open-init-version.md +++ /dev/null @@ -1,2 +0,0 @@ -- Use the version in the message when handling a MsgConnOpenInit - ([#2062](https://github.com/informalsystems/ibc-rs/issues/2062)) \ No newline at end of file diff --git a/.changelog/v0.14.0/features/ibc-relayer/2036-caching-metrics.md b/.changelog/v0.14.0/features/ibc-relayer/2036-caching-metrics.md deleted file mode 100644 index 4ba6cb3841..0000000000 --- a/.changelog/v0.14.0/features/ibc-relayer/2036-caching-metrics.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add a metric for query cache hits - ([#2036](https://github.com/informalsystems/ibc-rs/issues/2036)) \ No newline at end of file diff --git a/.changelog/v0.14.0/improvements/1936-missing-chain-warn.md b/.changelog/v0.14.0/improvements/1936-missing-chain-warn.md deleted file mode 100644 index eb510e28df..0000000000 --- a/.changelog/v0.14.0/improvements/1936-missing-chain-warn.md +++ /dev/null @@ -1,2 +0,0 @@ -- Log `missing chain in configuration` errors emitted during event processing at - debug level ([#1936](https://github.com/informalsystems/ibc-rs/issues/1936)) diff --git a/.changelog/v0.14.0/improvements/2045-tendermint-0.23.6.md b/.changelog/v0.14.0/improvements/2045-tendermint-0.23.6.md deleted file mode 100644 index 23784ad668..0000000000 --- a/.changelog/v0.14.0/improvements/2045-tendermint-0.23.6.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update tendermint-rs dependencies to v0.23.6 - ([#2045](https://github.com/informalsystems/ibc-rs/issues/2045)) \ No newline at end of file diff --git a/.changelog/v0.14.0/improvements/ibc-relayer-cli/1421-create-channel-cli.md b/.changelog/v0.14.0/improvements/ibc-relayer-cli/1421-create-channel-cli.md deleted file mode 100644 index a64398d712..0000000000 --- a/.changelog/v0.14.0/improvements/ibc-relayer-cli/1421-create-channel-cli.md +++ /dev/null @@ -1,2 +0,0 @@ -- Change `create channel` CLI command such that it is more difficult to create - clients / connections using it ([#1421](https://github.com/informalsystems/ibc-rs/issues/1421)) diff --git a/.changelog/v0.14.0/improvements/ibc-relayer-cli/2096-query-packet-pending.md b/.changelog/v0.14.0/improvements/ibc-relayer-cli/2096-query-packet-pending.md deleted file mode 100644 index 0ec1253df3..0000000000 --- a/.changelog/v0.14.0/improvements/ibc-relayer-cli/2096-query-packet-pending.md +++ /dev/null @@ -1,4 +0,0 @@ -- Added `query packet pending` command to list outstanding packet - commitments that are either unreceived or pending acknowledgement - at both ends of a channel. - ([#1862](https://github.com/informalsystems/ibc-rs/issues/1862)) diff --git a/.changelog/v0.14.0/improvements/ibc/1758-complete-ics26.md b/.changelog/v0.14.0/improvements/ibc/1758-complete-ics26.md deleted file mode 100644 index ebad8cf544..0000000000 --- a/.changelog/v0.14.0/improvements/ibc/1758-complete-ics26.md +++ /dev/null @@ -1 +0,0 @@ -- Complete ICS26 implementation ([#1758](https://github.com/informalsystems/ibc-rs/issues/1758)) \ No newline at end of file diff --git a/.changelog/v0.14.0/improvements/ibc/2068-chan-id-u64.md b/.changelog/v0.14.0/improvements/ibc/2068-chan-id-u64.md deleted file mode 100644 index f9ffd9d8cb..0000000000 --- a/.changelog/v0.14.0/improvements/ibc/2068-chan-id-u64.md +++ /dev/null @@ -1 +0,0 @@ -- Improve `ChannelId` validation. ([#2068](https://github.com/informalsystems/ibc-rs/issues/2068)) diff --git a/.changelog/v0.14.0/summary.md b/.changelog/v0.14.0/summary.md deleted file mode 100644 index 0f5e1996c2..0000000000 --- a/.changelog/v0.14.0/summary.md +++ /dev/null @@ -1,17 +0,0 @@ -This release notably features a new [`query packet pending`][pending] command to -list outstanding packet commitments that are either unreceived or pending -acknowledgement at both ends of a channel. - -The `ibc` crate now also come with a complete [ICS 026][ics-26] implementation. - -### Note for operators - -There is a new `query packet pending` command, see above for more information. - -The `create channel` command now requires an existing client and connection, -unless the `--new-client-connection` flag is provided. -Please [refer to the guide][create-channel] for more information. - -[ics-26]: https://github.com/cosmos/ibc/blob/master/spec/core/ics-026-routing-module/README.md -[pending]: https://hermes.informal.systems/commands/queries/packet.html#pending-packets -[create-channel]: http://hermes.informal.systems/commands/path-setup/channels.html#establish-channel diff --git a/.changelog/v0.14.1/bug-fixes/ibc-relayer/1970-app-latest-height.md b/.changelog/v0.14.1/bug-fixes/ibc-relayer/1970-app-latest-height.md deleted file mode 100644 index 167173977e..0000000000 --- a/.changelog/v0.14.1/bug-fixes/ibc-relayer/1970-app-latest-height.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fixed query for application status when application lags blockchain state. - ([#1970](https://github.com/informalsystems/ibc-rs/issues/1970)) \ No newline at end of file diff --git a/.changelog/v0.14.1/summary.md b/.changelog/v0.14.1/summary.md deleted file mode 100644 index 49c6ac749c..0000000000 --- a/.changelog/v0.14.1/summary.md +++ /dev/null @@ -1,2 +0,0 @@ -This release improves the reliability of the relayer by fixing an edge case where -some queries would fail if they reach a full node after a new block is committed but before the application state updates to reflect the changes in that block. diff --git a/.changelog/v0.15.0/2181-update-codeowners.md b/.changelog/v0.15.0/2181-update-codeowners.md deleted file mode 100644 index 5452abf60e..0000000000 --- a/.changelog/v0.15.0/2181-update-codeowners.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update `.github/CODEOWNERS` file so that each root-level directory is assigned - a default reviewer. ([#2181](https://github.com/informalsystems/ibc-rs/pull/2181)) diff --git a/.changelog/v0.15.0/bug-fixes/ibc-relayer/1971-non-batch-fix.md b/.changelog/v0.15.0/bug-fixes/ibc-relayer/1971-non-batch-fix.md deleted file mode 100644 index acf9f7a16f..0000000000 --- a/.changelog/v0.15.0/bug-fixes/ibc-relayer/1971-non-batch-fix.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix a bug where connection and channel handshakes would fail with non-batching transactions - ([#1971](https://github.com/informalsystems/ibc-rs/issues/1971)) diff --git a/.changelog/v0.15.0/bug-fixes/ibc-relayer/2180-client-expiry-time.md b/.changelog/v0.15.0/bug-fixes/ibc-relayer/2180-client-expiry-time.md deleted file mode 100644 index 5bbb0a47eb..0000000000 --- a/.changelog/v0.15.0/bug-fixes/ibc-relayer/2180-client-expiry-time.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fixed client expiry computation to avoid using local time. - ([#2180](https://github.com/informalsystems/ibc-rs/issues/2180)) \ No newline at end of file diff --git a/.changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md b/.changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md deleted file mode 100644 index 570c1b3338..0000000000 --- a/.changelog/v0.15.0/bug-fixes/ibc/2104-fix-commitment-computation.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix packet commitment calculation to match IBC-Go - ([#2104](https://github.com/informalsystems/ibc-rs/issues/2104)) diff --git a/.changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md b/.changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md deleted file mode 100644 index 0987d40b6f..0000000000 --- a/.changelog/v0.15.0/bug-fixes/ibc/2114-fix-ack-verification.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix incorrect acknowledgement verification - ([#2114](https://github.com/informalsystems/ibc-rs/issues/2114)) diff --git a/.changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md b/.changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md deleted file mode 100644 index af72298e4b..0000000000 --- a/.changelog/v0.15.0/bug-fixes/ibc/2178-conn-ack-bug-fix.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix connection identifier mix-up in connection acknowledgement processing - ([#2178](https://github.com/informalsystems/ibc-rs/issues/2178)) diff --git a/.changelog/v0.15.0/features/1986-gaia-e2e-tests.md b/.changelog/v0.15.0/features/1986-gaia-e2e-tests.md deleted file mode 100644 index 4b3fef7acf..0000000000 --- a/.changelog/v0.15.0/features/1986-gaia-e2e-tests.md +++ /dev/null @@ -1,2 +0,0 @@ -- Replaced gaia v5 with v7 in E2E tests. - ([#1986](https://github.com/informalsystems/ibc-rs/issues/1986)) \ No newline at end of file diff --git a/.changelog/v0.15.0/features/ibc-relayer/2112-new-metrics.md b/.changelog/v0.15.0/features/ibc-relayer/2112-new-metrics.md deleted file mode 100644 index 42e041ed48..0000000000 --- a/.changelog/v0.15.0/features/ibc-relayer/2112-new-metrics.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add six new metrics: `wallet_balance`, `ws_events`, `ws_reconnect`, - `tx_latency_submitted`, `tx_latency_confirmed`, `msg_num` - ([#2112](https://github.com/informalsystems/ibc-rs/issues/2112)) \ No newline at end of file diff --git a/.changelog/v0.15.0/improvements/ibc-relayer/1971-max-msg-num-min-bound.md b/.changelog/v0.15.0/improvements/ibc-relayer/1971-max-msg-num-min-bound.md deleted file mode 100644 index b416ad6d92..0000000000 --- a/.changelog/v0.15.0/improvements/ibc-relayer/1971-max-msg-num-min-bound.md +++ /dev/null @@ -1,2 +0,0 @@ -- Ensure `max_msg_num` is between 1 and 100 with a default of 30 - ([#1971](https://github.com/informalsystems/ibc-rs/issues/1971)) diff --git a/.changelog/v0.15.0/improvements/ibc-relayer/2031-misleading-misbehavior-error.md b/.changelog/v0.15.0/improvements/ibc-relayer/2031-misleading-misbehavior-error.md deleted file mode 100644 index 66941ed5c6..0000000000 --- a/.changelog/v0.15.0/improvements/ibc-relayer/2031-misleading-misbehavior-error.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fixed misleading error message leaking from the misbehavior detection task. - ([#2031](https://github.com/informalsystems/ibc-rs/issues/2031)) \ No newline at end of file diff --git a/.changelog/v0.15.0/improvements/ibc-relayer/2087-incremental-packet-clearing.md b/.changelog/v0.15.0/improvements/ibc-relayer/2087-incremental-packet-clearing.md deleted file mode 100644 index 024797fe2e..0000000000 --- a/.changelog/v0.15.0/improvements/ibc-relayer/2087-incremental-packet-clearing.md +++ /dev/null @@ -1,2 +0,0 @@ -- Added support for incremental processing of packet clearing commands. - ([#2087](https://github.com/informalsystems/ibc-rs/issues/2087)) \ No newline at end of file diff --git a/.changelog/v0.15.0/improvements/ibc-relayer/2192-adr009-impl.md b/.changelog/v0.15.0/improvements/ibc-relayer/2192-adr009-impl.md deleted file mode 100644 index ab1172cbcf..0000000000 --- a/.changelog/v0.15.0/improvements/ibc-relayer/2192-adr009-impl.md +++ /dev/null @@ -1,2 +0,0 @@ -- Implement ADR 9: add domain type for request messages that are passed to query - functions ([#2192](https://github.com/informalsystems/ibc-rs/issues/2192)) \ No newline at end of file diff --git a/.changelog/v0.15.0/improvements/ibc/2159-remove-ocaps.md b/.changelog/v0.15.0/improvements/ibc/2159-remove-ocaps.md deleted file mode 100644 index 54c3531742..0000000000 --- a/.changelog/v0.15.0/improvements/ibc/2159-remove-ocaps.md +++ /dev/null @@ -1,2 +0,0 @@ -- Remove object capabilities from the modules - ([#2159](https://github.com/informalsystems/ibc-rs/issues/2159)) \ No newline at end of file diff --git a/.changelog/v0.15.0/summary.md b/.changelog/v0.15.0/summary.md deleted file mode 100644 index 0252036cb8..0000000000 --- a/.changelog/v0.15.0/summary.md +++ /dev/null @@ -1,4 +0,0 @@ -This release brings a number of bug fixes, some performance improvements, -notably when [clearing packets](https://github.com/informalsystems/ibc-rs/issues/2087), -as well as [new metrics](https://github.com/informalsystems/ibc-rs/issues/2112) -for better observability of the relayer's operations. diff --git a/.changelog/v0.6.2/bug-fixes/1247-add-missing-protobuf-impl.md b/.changelog/v0.6.2/bug-fixes/1247-add-missing-protobuf-impl.md deleted file mode 100644 index b4722bd865..0000000000 --- a/.changelog/v0.6.2/bug-fixes/1247-add-missing-protobuf-impl.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add missing `Protobuf` impl for `ics03_connection::connection::Counterparty` ([#1247]) - -[#1247]: https://github.com/informalsystems/ibc-rs/issues/1247 diff --git a/.changelog/v0.6.2/features/1020-augment-error-type.md b/.changelog/v0.6.2/features/1020-augment-error-type.md deleted file mode 100644 index 4fbba672cc..0000000000 --- a/.changelog/v0.6.2/features/1020-augment-error-type.md +++ /dev/null @@ -1,3 +0,0 @@ -- Augment ClientCreationFailed error with chain id and WS address ([#1020]) - -[#1020]: https://github.com/informalsystems/ibc-rs/issues/1020 diff --git a/.changelog/v0.6.2/features/1021-cli-indicate-config-file-error.md b/.changelog/v0.6.2/features/1021-cli-indicate-config-file-error.md deleted file mode 100644 index ff5de3ccb2..0000000000 --- a/.changelog/v0.6.2/features/1021-cli-indicate-config-file-error.md +++ /dev/null @@ -1,3 +0,0 @@ -- Improve the error message for config file parse errors ([#1021]) - -[#1021]: https://github.com/informalsystems/ibc-rs/issues/1021 \ No newline at end of file diff --git a/.changelog/v0.6.2/features/1229-upgrade-clis.md b/.changelog/v0.6.2/features/1229-upgrade-clis.md deleted file mode 100644 index 980128a25f..0000000000 --- a/.changelog/v0.6.2/features/1229-upgrade-clis.md +++ /dev/null @@ -1,3 +0,0 @@ -- Fix for upgrade CLI regression using new type ics02::TrustThreshold ([#1229]) - -[#1229]: https://github.com/informalsystems/ibc-rs/issues/1229 diff --git a/.changelog/v0.6.2/features/988-flex-error.md b/.changelog/v0.6.2/features/988-flex-error.md deleted file mode 100644 index d89a1b6525..0000000000 --- a/.changelog/v0.6.2/features/988-flex-error.md +++ /dev/null @@ -1,4 +0,0 @@ -- Use the [`flex-error`](https://docs.rs/flex-error/) crate to define and -handle errors ([#1158]) - -[#1158]: https://github.com/informalsystems/ibc-rs/issues/1158 diff --git a/.changelog/v0.6.2/improvements/1245-max-params-validation.md b/.changelog/v0.6.2/improvements/1245-max-params-validation.md deleted file mode 100644 index f0205f6b39..0000000000 --- a/.changelog/v0.6.2/improvements/1245-max-params-validation.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add semantic validation of of `max_tx_size` and `max_num_msg` config options ([#1245]) - -[#1245]: https://github.com/informalsystems/ibc-rs/issues/1245 diff --git a/.changelog/v0.6.2/summary.md b/.changelog/v0.6.2/summary.md deleted file mode 100644 index dd59d511d1..0000000000 --- a/.changelog/v0.6.2/summary.md +++ /dev/null @@ -1,5 +0,0 @@ -This minor release of Hermes re-enables the `upgrade client`, `upgrade clients`, -`tx raw upgrade-clients`, and `tx raw upgrade-chain`, and otherwise -contains a few bug fixes and internal improvements. - -Upgrading from version `0.6.1` to `0.6.2` requires no explicit steps. diff --git a/.changelog/v0.7.0/bug-fixes/1261-gm-req-detect.md b/.changelog/v0.7.0/bug-fixes/1261-gm-req-detect.md deleted file mode 100644 index 2361683873..0000000000 --- a/.changelog/v0.7.0/bug-fixes/1261-gm-req-detect.md +++ /dev/null @@ -1,5 +0,0 @@ -- [gm] Fix silent exit when requirements are missing - -[#1261]: https://github.com/informalsystems/ibc-rs/issues/1261 - - diff --git a/.changelog/v0.7.0/bug-fixes/1285-fix-typeok-bug.md b/.changelog/v0.7.0/bug-fixes/1285-fix-typeok-bug.md deleted file mode 100644 index 44dee3d0de..0000000000 --- a/.changelog/v0.7.0/bug-fixes/1285-fix-typeok-bug.md +++ /dev/null @@ -1,4 +0,0 @@ -- Fix small typo in IBC.tla specification - -[#1285]: https://github.com/informalsystems/ibc-rs/pull/1285 - diff --git a/.changelog/v0.7.0/bug-fixes/ibc/1257-set-capability-index.md b/.changelog/v0.7.0/bug-fixes/ibc/1257-set-capability-index.md deleted file mode 100644 index 2c78951c52..0000000000 --- a/.changelog/v0.7.0/bug-fixes/ibc/1257-set-capability-index.md +++ /dev/null @@ -1,3 +0,0 @@ -- Set the index of `ibc::ics05_port::capabilities::Capability` ([#1257]) - -[#1257]: https://github.com/informalsystems/ibc-rs/issues/1257 diff --git a/.changelog/v0.7.0/features/1065-keys-delete.md b/.changelog/v0.7.0/features/1065-keys-delete.md deleted file mode 100644 index 52a132c11d..0000000000 --- a/.changelog/v0.7.0/features/1065-keys-delete.md +++ /dev/null @@ -1,3 +0,0 @@ -- Added `keys delete` CLI command ([#1065]) - -[#1065]: https://github.com/informalsystems/ibc-rs/issues/1065 diff --git a/.changelog/v0.7.0/features/1175-update-ci.md b/.changelog/v0.7.0/features/1175-update-ci.md deleted file mode 100644 index 52b9f44b74..0000000000 --- a/.changelog/v0.7.0/features/1175-update-ci.md +++ /dev/null @@ -1,4 +0,0 @@ -- Update CI to test with gaiad v5.0.5 ([#1175]) - - -[#1175]: https://github.com/informalsystems/ibc-rs/issues/1175 diff --git a/.changelog/v0.7.0/features/1287-upgrade-legacy.md b/.changelog/v0.7.0/features/1287-upgrade-legacy.md deleted file mode 100644 index 2ff89373cf..0000000000 --- a/.changelog/v0.7.0/features/1287-upgrade-legacy.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add `--legacy | -l` flag to support upgrades for chains built with Cosmos SDK < v0.43.0 ([#1287]) - -[#1287]: https://github.com/informalsystems/ibc-rs/issues/1287 diff --git a/.changelog/v0.7.0/features/843-rest-api.md b/.changelog/v0.7.0/features/843-rest-api.md deleted file mode 100644 index 6a3866ece1..0000000000 --- a/.changelog/v0.7.0/features/843-rest-api.md +++ /dev/null @@ -1,3 +0,0 @@ -- Expose the Hermes config and internal state over a REST API ([#843]) - -[#843]: https://github.com/informalsystems/ibc-rs/issues/843 diff --git a/.changelog/v0.7.0/features/901-conditionally-spawn-worker.md b/.changelog/v0.7.0/features/901-conditionally-spawn-worker.md deleted file mode 100644 index f953aeb8c1..0000000000 --- a/.changelog/v0.7.0/features/901-conditionally-spawn-worker.md +++ /dev/null @@ -1,3 +0,0 @@ -- Spawn packet workers only when there are outstanding packets or acknowledgements to relay ([#901]) - -[#901]: https://github.com/informalsystems/ibc-rs/issues/901 diff --git a/.changelog/v0.7.0/features/948-upgrade-to-cosmos-sdk-v0.43.md b/.changelog/v0.7.0/features/948-upgrade-to-cosmos-sdk-v0.43.md deleted file mode 100644 index 8b558e883e..0000000000 --- a/.changelog/v0.7.0/features/948-upgrade-to-cosmos-sdk-v0.43.md +++ /dev/null @@ -1,3 +0,0 @@ -- Upgrade to Cosmos SDK proto (v0.43.0) & ibc-go proto (v1.0.0) ([#948]) - -- [#948]: https://github.com/informalsystems/ibc-rs/pull/948 diff --git a/.changelog/v0.7.0/improvements/1132-query-channels-filter.md b/.changelog/v0.7.0/improvements/1132-query-channels-filter.md deleted file mode 100644 index fa73b5d663..0000000000 --- a/.changelog/v0.7.0/improvements/1132-query-channels-filter.md +++ /dev/null @@ -1,4 +0,0 @@ -- Add optional destination chain and `--verbose` options for `query channels` CLI ([#1132]) - -[#1132]: https://github.com/informalsystems/ibc-rs/issues/1132 - diff --git a/.changelog/v0.7.0/improvements/1191-ica-compat.md b/.changelog/v0.7.0/improvements/1191-ica-compat.md deleted file mode 100644 index b189173405..0000000000 --- a/.changelog/v0.7.0/improvements/1191-ica-compat.md +++ /dev/null @@ -1,4 +0,0 @@ -- Improve support for Interchain Accounts (ICS 027) ([#1191]) - -[#1191]: https://github.com/informalsystems/ibc-rs/issues/1191 - diff --git a/.changelog/v0.7.0/improvements/1249-update-modelator.md b/.changelog/v0.7.0/improvements/1249-update-modelator.md deleted file mode 100644 index 7912ece5fd..0000000000 --- a/.changelog/v0.7.0/improvements/1249-update-modelator.md +++ /dev/null @@ -1,3 +0,0 @@ -- Update Modelator to 0.2.0 ([#1249]) - -[#1249]: https://github.com/informalsystems/ibc-rs/pull/1249 diff --git a/.changelog/v0.7.0/improvements/1265-async-tx-confirmation.md b/.changelog/v0.7.0/improvements/1265-async-tx-confirmation.md deleted file mode 100644 index 59cacc2c1b..0000000000 --- a/.changelog/v0.7.0/improvements/1265-async-tx-confirmation.md +++ /dev/null @@ -1,5 +0,0 @@ -- Improve performance and reliability of the relayer by asynchronously waiting for tx confirmations ([#1124], [#1265]) - -[#1124]: https://github.com/informalsystems/ibc-rs/issues/1124 -[#1265]: https://github.com/informalsystems/ibc-rs/issues/1265 - diff --git a/.changelog/v0.7.0/improvements/ibc/1297-impl-consensus-state.md b/.changelog/v0.7.0/improvements/ibc/1297-impl-consensus-state.md deleted file mode 100644 index 4719175311..0000000000 --- a/.changelog/v0.7.0/improvements/ibc/1297-impl-consensus-state.md +++ /dev/null @@ -1,3 +0,0 @@ -- Implement `ics02_client::client_consensus::ConsensusState` for `AnyConsensusState` ([#1297]) - -[#1297]: https://github.com/informalsystems/ibc-rs/issues/1297 diff --git a/.changelog/v0.7.0/summary.md b/.changelog/v0.7.0/summary.md deleted file mode 100644 index e098cdbc39..0000000000 --- a/.changelog/v0.7.0/summary.md +++ /dev/null @@ -1,3 +0,0 @@ -This release of Hermes is the first to be compatible with the development version of Cosmos SDK 0.43. -Hermes 0.7.0 also improves the performance and reliability of the relayer, notably by waiting asynchronously for transactions to be confirmed. -Additionnally, Hermes now includes a REST server which exposes the relayer's internal state over HTTP. diff --git a/.changelog/v0.7.1/bug-fixes/1312-fix-gm-stderr.md b/.changelog/v0.7.1/bug-fixes/1312-fix-gm-stderr.md deleted file mode 100644 index 34ab0696fa..0000000000 --- a/.changelog/v0.7.1/bug-fixes/1312-fix-gm-stderr.md +++ /dev/null @@ -1,7 +0,0 @@ - -- [gm](scripts/gm) - - Fix gaiad keys add prints to stderr instead of stdout in SDK 0.43 ([#1312]) - - Bumped default rpc_timeout in Hermes config to 5s ([#1312]) - -[#1312]: https://github.com/informalsystems/ibc-rs/issues/1312 - diff --git a/.changelog/v0.7.1/bug-fixes/1343-fix-header-decoding-error.md b/.changelog/v0.7.1/bug-fixes/1343-fix-header-decoding-error.md deleted file mode 100644 index 30aaea171a..0000000000 --- a/.changelog/v0.7.1/bug-fixes/1343-fix-header-decoding-error.md +++ /dev/null @@ -1 +0,0 @@ -- Fix header decoding error which resulted in killing the chain runtime ([#1342](https://github.com/informalsystems/ibc-rs/issues/1342)) diff --git a/.changelog/v0.7.1/features/1267-ethermint-support.md b/.changelog/v0.7.1/features/1267-ethermint-support.md deleted file mode 100644 index 602c516bee..0000000000 --- a/.changelog/v0.7.1/features/1267-ethermint-support.md +++ /dev/null @@ -1,4 +0,0 @@ -- Added post-Stargate (v0.5+) Ethermint support ([#1267] [#1071]) - -[#1267]: https://github.com/informalsystems/ibc-rs/issues/1267 -[#1071]: https://github.com/informalsystems/ibc-rs/issues/1071 diff --git a/.changelog/v0.7.1/improvements/1281-derive-traits-module-errors.md b/.changelog/v0.7.1/improvements/1281-derive-traits-module-errors.md deleted file mode 100644 index 5349faf775..0000000000 --- a/.changelog/v0.7.1/improvements/1281-derive-traits-module-errors.md +++ /dev/null @@ -1,4 +0,0 @@ -- Derive `Debug`, `PartialEq` and `Eq` traits for module errors ([#1281]) - -[#1281]: https://github.com/informalsystems/ibc-rs/issues/1281 - diff --git a/.changelog/v0.7.1/improvements/1311-mbt-test-client-upgrade.md b/.changelog/v0.7.1/improvements/1311-mbt-test-client-upgrade.md deleted file mode 100644 index d93be65047..0000000000 --- a/.changelog/v0.7.1/improvements/1311-mbt-test-client-upgrade.md +++ /dev/null @@ -1,4 +0,0 @@ -- Add MBT tests for ICS 07 Client Upgrade ([#1311]) - -[#1311]: https://github.com/informalsystems/ibc-rs/issues/1311 - diff --git a/.changelog/v0.7.1/improvements/1319-u256-amount-transfer.md b/.changelog/v0.7.1/improvements/1319-u256-amount-transfer.md deleted file mode 100644 index 85f8bdfd08..0000000000 --- a/.changelog/v0.7.1/improvements/1319-u256-amount-transfer.md +++ /dev/null @@ -1,4 +0,0 @@ -- Add support for uint256 transfer amounts ([#1319]) - -[#1319]: https://github.com/informalsystems/ibc-rs/issues/1319 - diff --git a/.changelog/v0.7.1/improvements/ibc/1268-reader-result.md b/.changelog/v0.7.1/improvements/ibc/1268-reader-result.md deleted file mode 100644 index c477be4df4..0000000000 --- a/.changelog/v0.7.1/improvements/ibc/1268-reader-result.md +++ /dev/null @@ -1,3 +0,0 @@ -- Change all `*Reader` traits to return `Result` instead of `Option` ([#1268]) - -[#1268]: https://github.com/informalsystems/ibc-rs/issues/1268 diff --git a/.changelog/v0.7.1/improvements/ibc/1333-modules-error.md b/.changelog/v0.7.1/improvements/ibc/1333-modules-error.md deleted file mode 100644 index 103394ce6f..0000000000 --- a/.changelog/v0.7.1/improvements/ibc/1333-modules-error.md +++ /dev/null @@ -1,9 +0,0 @@ - -- Clean up modules' errors ([#1333]) - -[#1333]: https://github.com/informalsystems/ibc-rs/issues/1333 diff --git a/.changelog/v0.7.1/summary.md b/.changelog/v0.7.1/summary.md deleted file mode 100644 index d5a6a6d523..0000000000 --- a/.changelog/v0.7.1/summary.md +++ /dev/null @@ -1,2 +0,0 @@ -This minor release of Hermes notably features support for Ethermint chains and transfer amounts expressed as a 256-bit unsigned integer. -This release also fixes a bug where the chain runtime within the relayer would crash when failing to decode a invalid header included in a `ClientUpdate` IBC event. diff --git a/.changelog/v0.7.2/features/1155-secp256k1-signatures.md b/.changelog/v0.7.2/features/1155-secp256k1-signatures.md deleted file mode 100644 index ba8f34df47..0000000000 --- a/.changelog/v0.7.2/features/1155-secp256k1-signatures.md +++ /dev/null @@ -1 +0,0 @@ -- Support for chains which use Secp256k1 signatures in consensus votes ([#1155](https://github.com/informalsystems/ibc-rs/issues/1155)) diff --git a/.changelog/v0.7.2/features/1290-stubborn-workers.md b/.changelog/v0.7.2/features/1290-stubborn-workers.md deleted file mode 100644 index d89e80f338..0000000000 --- a/.changelog/v0.7.2/features/1290-stubborn-workers.md +++ /dev/null @@ -1,2 +0,0 @@ -- Modified packet worker to use stubborn strategy ([#1290](https://github.com/informalsystems/ibc-rs/issues/1290)) - diff --git a/.changelog/v0.7.2/features/1362-skip-consensus-states.md b/.changelog/v0.7.2/features/1362-skip-consensus-states.md deleted file mode 100644 index 44b0e49cfa..0000000000 --- a/.changelog/v0.7.2/features/1362-skip-consensus-states.md +++ /dev/null @@ -1 +0,0 @@ -- Skip `consensus_heights` query in `update_client` when possible ([#1362](https://github.com/informalsystems/ibc-rs/issues/1362)) diff --git a/.changelog/v0.7.2/features/1371-gm-features.md b/.changelog/v0.7.2/features/1371-gm-features.md deleted file mode 100644 index d82b27464f..0000000000 --- a/.changelog/v0.7.2/features/1371-gm-features.md +++ /dev/null @@ -1,4 +0,0 @@ - -- [gm](scripts/gm) - - Binaries in the config can be defined as URLs now. - - Add the option to set gm-lib path via the $GM_LIB environment variable ([#1365](https://github.com/informalsystems/ibc-rs/issues/1365)) diff --git a/.changelog/v0.7.2/features/1380-toggle-confirmations.md b/.changelog/v0.7.2/features/1380-toggle-confirmations.md deleted file mode 100644 index 195ef85191..0000000000 --- a/.changelog/v0.7.2/features/1380-toggle-confirmations.md +++ /dev/null @@ -1 +0,0 @@ -- Support for disabling tx confirmation mechanism ([#1380](https://github.com/informalsystems/ibc-rs/issues/1380)) diff --git a/.changelog/v0.7.2/improvements/1156-use-core-alloc.md b/.changelog/v0.7.2/improvements/1156-use-core-alloc.md deleted file mode 100644 index 672c0731dc..0000000000 --- a/.changelog/v0.7.2/improvements/1156-use-core-alloc.md +++ /dev/null @@ -1 +0,0 @@ -- Use `core` and `alloc` crates for `no_std` compatibility ([#1156](https://github.com/informalsystems/ibc-rs/issues/1156)) diff --git a/.changelog/v0.7.2/improvements/1336-better-health-check.md b/.changelog/v0.7.2/improvements/1336-better-health-check.md deleted file mode 100644 index 4d27362957..0000000000 --- a/.changelog/v0.7.2/improvements/1336-better-health-check.md +++ /dev/null @@ -1,2 +0,0 @@ -- Improve performance of health check, and only perform it on `hermes start`. - Add a `hermes health-check` command. ([#1336](https://github.com/informalsystems/ibc-rs/issues/1336)) diff --git a/.changelog/v0.7.2/improvements/1337-semver-pre-compat.md b/.changelog/v0.7.2/improvements/1337-semver-pre-compat.md deleted file mode 100644 index 8ecc3d68cf..0000000000 --- a/.changelog/v0.7.2/improvements/1337-semver-pre-compat.md +++ /dev/null @@ -1 +0,0 @@ -- Treat pre-releases of the Cosmos SDK as their standard version in compatibility check ([#1337](https://github.com/informalsystems/ibc-rs/issues/1337)) diff --git a/.changelog/v0.7.2/improvements/1344-bump-compat-0.44.md b/.changelog/v0.7.2/improvements/1344-bump-compat-0.44.md deleted file mode 100644 index 2448d2ad84..0000000000 --- a/.changelog/v0.7.2/improvements/1344-bump-compat-0.44.md +++ /dev/null @@ -1 +0,0 @@ -- Bump Cosmos SDK compatibility to v0.44.0 ([#1344](https://github.com/informalsystems/ibc-rs/issues/1344)) diff --git a/.changelog/v0.7.2/improvements/1376-consensus-params-explicit-height.md b/.changelog/v0.7.2/improvements/1376-consensus-params-explicit-height.md deleted file mode 100644 index dce8e9f170..0000000000 --- a/.changelog/v0.7.2/improvements/1376-consensus-params-explicit-height.md +++ /dev/null @@ -1 +0,0 @@ -- Improve reliability of health check ([#1382](https://github.com/informalsystems/ibc-rs/issues/1376)) diff --git a/.changelog/v0.7.2/summary.md b/.changelog/v0.7.2/summary.md deleted file mode 100644 index 944ef18229..0000000000 --- a/.changelog/v0.7.2/summary.md +++ /dev/null @@ -1,4 +0,0 @@ -This minor release brings substantial performance improvements as well as -support for chains using Secp256k1 signatures in consensus votes. - -It also bumps the compatibility to Cosmos SDK 0.44. diff --git a/.changelog/v0.7.3/bug-fixes/ibc-relayer/1345-fix-tx-simulation-0.42.md b/.changelog/v0.7.3/bug-fixes/ibc-relayer/1345-fix-tx-simulation-0.42.md deleted file mode 100644 index 430dc7a65b..0000000000 --- a/.changelog/v0.7.3/bug-fixes/ibc-relayer/1345-fix-tx-simulation-0.42.md +++ /dev/null @@ -1,4 +0,0 @@ -- Fix a bug introduced in Hermes v0.7.0 where tx simulations would fail on - chains based on Cosmos SDK 0.42. This would cause Hermes to use the max - gas specified in the config when submitted the tx, leading to high fees. - ([#1345](https://github.com/informalsystems/ibc-rs/issues/1345)) \ No newline at end of file diff --git a/.changelog/v0.7.3/bug-fixes/ibc-relayer/1402-fix-account-seq-error-case.md b/.changelog/v0.7.3/bug-fixes/ibc-relayer/1402-fix-account-seq-error-case.md deleted file mode 100644 index 509f3d3067..0000000000 --- a/.changelog/v0.7.3/bug-fixes/ibc-relayer/1402-fix-account-seq-error-case.md +++ /dev/null @@ -1,3 +0,0 @@ -- Only increase cached account sequence number when `broadcast_tx_sync` fails, - therefore ensuring that the cached sequence number stays in sync with the - node. ([#1402](https://github.com/informalsystems/ibc-rs/issues/1402)) \ No newline at end of file diff --git a/.changelog/v0.7.3/improvements/ibc-relayer/1392-trusting-period-default.md b/.changelog/v0.7.3/improvements/ibc-relayer/1392-trusting-period-default.md deleted file mode 100644 index 42d997a229..0000000000 --- a/.changelog/v0.7.3/improvements/ibc-relayer/1392-trusting-period-default.md +++ /dev/null @@ -1,2 +0,0 @@ -- Set default trusting period to be 2/3 of unbonding period for Cosmos chains - ([#1392](https://github.com/informalsystems/ibc-rs/issues/1392)) \ No newline at end of file diff --git a/.changelog/v0.7.3/summary.md b/.changelog/v0.7.3/summary.md deleted file mode 100644 index 0f3643fc99..0000000000 --- a/.changelog/v0.7.3/summary.md +++ /dev/null @@ -1,4 +0,0 @@ -This minor release most notably includes a fix for a bug introduced in v0.7.0 -where Hermes would always use the max gas when submitting transactions to -chains based on Cosmos SDK <= 0.42. -It also improves the handling of account sequence numbers diff --git a/.changelog/v0.8.0-pre.1/breaking-changes/ibc/1214-ics07.md b/.changelog/v0.8.0-pre.1/breaking-changes/ibc/1214-ics07.md deleted file mode 100644 index 42080cdefc..0000000000 --- a/.changelog/v0.8.0-pre.1/breaking-changes/ibc/1214-ics07.md +++ /dev/null @@ -1,3 +0,0 @@ -- The `check_header_and_update_state` method of the `ClientDef` - trait (ICS02) has been expanded to facilitate ICS07 - ([#1214](https://github.com/informalsystems/ibc-rs/issues/1214)) \ No newline at end of file diff --git a/.changelog/v0.8.0-pre.1/features/1433-memo-field.md b/.changelog/v0.8.0-pre.1/features/1433-memo-field.md deleted file mode 100644 index c25e653426..0000000000 --- a/.changelog/v0.8.0-pre.1/features/1433-memo-field.md +++ /dev/null @@ -1,3 +0,0 @@ -- Add support for the `tx.memo` field ([#1433]) - -[#1433]: https://github.com/informalsystems/ibc-rs/issues/1433 diff --git a/.changelog/v0.8.0-pre.1/features/ibc-relayer/1457-default-gas.md b/.changelog/v0.8.0-pre.1/features/ibc-relayer/1457-default-gas.md deleted file mode 100644 index 94b731220e..0000000000 --- a/.changelog/v0.8.0-pre.1/features/ibc-relayer/1457-default-gas.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add a `default_gas` setting to be used for submitting a tx when tx simulation - fails ([#1457](https://github.com/informalsystems/ibc-rs/issues/1457)) \ No newline at end of file diff --git a/.changelog/v0.8.0-pre.1/features/ibc-relayer/1464-ibc-go-check.md b/.changelog/v0.8.0-pre.1/features/ibc-relayer/1464-ibc-go-check.md deleted file mode 100644 index 69a75ed2ad..0000000000 --- a/.changelog/v0.8.0-pre.1/features/ibc-relayer/1464-ibc-go-check.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update compatibility check for IBC-Go dependency - ([#1464](https://github.com/informalsystems/ibc-rs/issues/1464)) \ No newline at end of file diff --git a/.changelog/v0.8.0-pre.1/features/ibc/1214-ics07.md b/.changelog/v0.8.0-pre.1/features/ibc/1214-ics07.md deleted file mode 100644 index 460b04b45a..0000000000 --- a/.changelog/v0.8.0-pre.1/features/ibc/1214-ics07.md +++ /dev/null @@ -1,2 +0,0 @@ -- Add ICS07 verification functionality by using `tendermint-light-client` - ([#1214](https://github.com/informalsystems/ibc-rs/issues/1214)) diff --git a/.changelog/v0.8.0-pre.1/improvements/ibc-relayer/1231-begin-end-block-events.md b/.changelog/v0.8.0-pre.1/improvements/ibc-relayer/1231-begin-end-block-events.md deleted file mode 100644 index 4f8f5af6a5..0000000000 --- a/.changelog/v0.8.0-pre.1/improvements/ibc-relayer/1231-begin-end-block-events.md +++ /dev/null @@ -1,2 +0,0 @@ -- Handle SendPacket events originating from Tendermint ABCI's BeginBlock - and EndBlock methods ([#1231](https://github.com/informalsystems/ibc-rs/issues/1231)) diff --git a/.changelog/v0.8.0-pre.1/improvements/ibc-relayer/1440-improve-error-msg-create-client.md b/.changelog/v0.8.0-pre.1/improvements/ibc-relayer/1440-improve-error-msg-create-client.md deleted file mode 100644 index eb6c3d8170..0000000000 --- a/.changelog/v0.8.0-pre.1/improvements/ibc-relayer/1440-improve-error-msg-create-client.md +++ /dev/null @@ -1,3 +0,0 @@ -- Improve error message when `create client` fails and add a health - check for the trusting period being smaller than the unbonding period - ([#1440](https://github.com/informalsystems/ibc-rs/issues/1440)) diff --git a/.changelog/v0.8.0-pre.1/summary.md b/.changelog/v0.8.0-pre.1/summary.md deleted file mode 100644 index 8ba6721527..0000000000 --- a/.changelog/v0.8.0-pre.1/summary.md +++ /dev/null @@ -1,10 +0,0 @@ -This is a pre-release which depends on forks of various Rust libraries. -As such, it is advised to avoid depending on the `ibc` and `ibc-relayer` crates -at version 0.8.0-pre.1. - -However, Hermes v0.8.0-pre.1 is considered stable and it is recommended for all -users to update to this version. - -This release notably includes a new [`memo_prefix`][memo] configuration option -for specifying a prefix to be include in the memo of each transaction submitted -by Hermes. diff --git a/.changelog/v0.8.0/breaking-changes/1519-msrv-1.56.md b/.changelog/v0.8.0/breaking-changes/1519-msrv-1.56.md deleted file mode 100644 index 3fec5b405c..0000000000 --- a/.changelog/v0.8.0/breaking-changes/1519-msrv-1.56.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update MSRV to Rust 1.56 and use the 2021 edition - ([#1519](https://github.com/informalsystems/ibc-rs/issues/1519)) \ No newline at end of file diff --git a/.changelog/v0.8.0/bug-fixes/1445-clock-drift.md b/.changelog/v0.8.0/bug-fixes/1445-clock-drift.md deleted file mode 100644 index a92f3b9798..0000000000 --- a/.changelog/v0.8.0/bug-fixes/1445-clock-drift.md +++ /dev/null @@ -1,9 +0,0 @@ -- Fix for client state clock drift [#1445]: - * Added new config param `max_clock_drift` to prevent - the problem for appearing in newly-created clients. - * Added a synchronos waiting in client update logic - to allow destination chain to reach a new height - before submitting a client update message. - - -[#1445]: https://github.com/informalsystems/ibc-rs/issues/1445 diff --git a/.changelog/v0.8.0/bug-fixes/1504-timeout_check.md b/.changelog/v0.8.0/bug-fixes/1504-timeout_check.md deleted file mode 100644 index 66bfdde2c1..0000000000 --- a/.changelog/v0.8.0/bug-fixes/1504-timeout_check.md +++ /dev/null @@ -1,2 +0,0 @@ -- Fix for packet timeout computation - ([#1504](https://github.com/informalsystems/ibc-rs/issues/1504)) \ No newline at end of file diff --git a/.changelog/v0.8.0/improvements/1417-update-client-misbehavior-perf.md b/.changelog/v0.8.0/improvements/1417-update-client-misbehavior-perf.md deleted file mode 100644 index 40e88bed72..0000000000 --- a/.changelog/v0.8.0/improvements/1417-update-client-misbehavior-perf.md +++ /dev/null @@ -1,2 +0,0 @@ -- Improve performance of misbehaviour checks triggered by an `UpdateClient` - event ([#1417](https://github.com/informalsystems/ibc-rs/issues/1417)) \ No newline at end of file diff --git a/.changelog/v0.8.0/improvements/1502-update-prost-09.md b/.changelog/v0.8.0/improvements/1502-update-prost-09.md deleted file mode 100644 index 8ec716b587..0000000000 --- a/.changelog/v0.8.0/improvements/1502-update-prost-09.md +++ /dev/null @@ -1,2 +0,0 @@ -- Update to official releases of `prost` 0.9 and `tonic` 0.6 - ([#1502](https://github.com/informalsystems/ibc-rs/issues/1502)) \ No newline at end of file diff --git a/.changelog/v0.8.0/improvements/ibc/1436-restructure-to-match-ibc-go.md b/.changelog/v0.8.0/improvements/ibc/1436-restructure-to-match-ibc-go.md deleted file mode 100644 index 74e62cc066..0000000000 --- a/.changelog/v0.8.0/improvements/ibc/1436-restructure-to-match-ibc-go.md +++ /dev/null @@ -1,4 +0,0 @@ -- Restructure the layout of the `ibc` crate to match `ibc-go`'s [layout][ibc-go-layout] ([#1436][issue-1436]). - -[issue-1436]: https://github.com/informalsystems/ibc-rs/issues/1436 -[ibc-go-layout]: https://github.com/cosmos/ibc-go#contents diff --git a/.changelog/v0.8.0/improvements/ibc/1460-path-fromstr.md b/.changelog/v0.8.0/improvements/ibc/1460-path-fromstr.md deleted file mode 100644 index df0743ee62..0000000000 --- a/.changelog/v0.8.0/improvements/ibc/1460-path-fromstr.md +++ /dev/null @@ -1 +0,0 @@ -- Implement `FromStr` to enable string-encoded paths to be converted into Path identifiers ([#1460](https://github.com/informalsystems/ibc-rs/issues/1460)) diff --git a/.changelog/v0.8.0/improvements/ibc/838-converting-IbcEvent-into-AbciEvent.md b/.changelog/v0.8.0/improvements/ibc/838-converting-IbcEvent-into-AbciEvent.md deleted file mode 100644 index 502e199cf1..0000000000 --- a/.changelog/v0.8.0/improvements/ibc/838-converting-IbcEvent-into-AbciEvent.md +++ /dev/null @@ -1 +0,0 @@ -- Support for converting `ibc::events::IbcEvent` into `tendermint::abci::Event` ([#838](https://github.com/informalsystems/ibc-rs/issues/838)) diff --git a/.changelog/v0.8.0/summary.md b/.changelog/v0.8.0/summary.md deleted file mode 100644 index e90a86f670..0000000000 --- a/.changelog/v0.8.0/summary.md +++ /dev/null @@ -1,10 +0,0 @@ -This is the final release of version 0.8.0, which now depends on the official releases of the `prost` and `tonic` crates. -In addition to everything that's included in v0.8.0-pre.1, this release updates the minimum supported Rust version to 1.56, -and contains various bug fixes and performance improvements which make the relayer more reliable. - -#### Notice for operators -A new setting was added to the Hermes configuration: `max_block_time`. -This setting specifies the maximum time per block for this chain. -The block time together with the clock drift are added to the source drift to estimate -the maximum clock drift when creating a client on this chain. -For Cosmos-SDK chains a good approximation is `timeout_propose` + `timeout_commit` diff --git a/.changelog/v0.9.0/bug-fixes/ibc/1532-connOpenAck-counterparty-conn-id-not-set.md b/.changelog/v0.9.0/bug-fixes/ibc/1532-connOpenAck-counterparty-conn-id-not-set.md deleted file mode 100644 index a0136fcd10..0000000000 --- a/.changelog/v0.9.0/bug-fixes/ibc/1532-connOpenAck-counterparty-conn-id-not-set.md +++ /dev/null @@ -1,5 +0,0 @@ -- Set the connection counterparty in the ICS03 [`connOpenAck` handler][conn-open-ack-handler] - ([#1532](https://github.com/informalsystems/ibc-rs/issues/1532)) - -[conn-open-ack-handler]: https://github.com/informalsystems/ibc-rs/blob/master/modules/src/core/ics03_connection/handler/conn_open_ack.rs - diff --git a/.changelog/v0.9.0/features/1408-vega-protos.md b/.changelog/v0.9.0/features/1408-vega-protos.md deleted file mode 100644 index d0e97aee0f..0000000000 --- a/.changelog/v0.9.0/features/1408-vega-protos.md +++ /dev/null @@ -1,2 +0,0 @@ -- Support for compatibility with gaia Vega upgrade (protos matching ibc-go v1.2.2 and SDK v0.44.3) - ([#1408](https://github.com/informalsystems/ibc-rs/issues/1408)) diff --git a/.changelog/v0.9.0/features/1534-ibc-queries.md b/.changelog/v0.9.0/features/1534-ibc-queries.md deleted file mode 100644 index 81868ac372..0000000000 --- a/.changelog/v0.9.0/features/1534-ibc-queries.md +++ /dev/null @@ -1,2 +0,0 @@ -- Optimize the WS client to subscribe to IBC events only (instead of all Tx - events) ([#1534](https://github.com/informalsystems/ibc-rs/issues/1534)) \ No newline at end of file diff --git a/.changelog/v0.9.0/features/ibc-relayer/1518-config-modes.md b/.changelog/v0.9.0/features/ibc-relayer/1518-config-modes.md deleted file mode 100644 index c42e7de9b0..0000000000 --- a/.changelog/v0.9.0/features/ibc-relayer/1518-config-modes.md +++ /dev/null @@ -1,2 +0,0 @@ -- Allow for more granular control of relaying modes. The `mode` configuration section replaces the `strategy` option. - ([#1518](https://github.com/informalsystems/ibc-rs/issues/1518)) diff --git a/.changelog/v0.9.0/improvements/1544-typed-tla-mbt-specs.md b/.changelog/v0.9.0/improvements/1544-typed-tla-mbt-specs.md deleted file mode 100644 index 12679a61bf..0000000000 --- a/.changelog/v0.9.0/improvements/1544-typed-tla-mbt-specs.md +++ /dev/null @@ -1,2 +0,0 @@ -- Upgrade IBC-rs TLA+ MBT models to modern Apalache type annotations - ([#1544](https://github.com/informalsystems/ibc-rs/issues/1544)) \ No newline at end of file diff --git a/.changelog/v0.9.0/improvements/1556-arch-doc.md b/.changelog/v0.9.0/improvements/1556-arch-doc.md deleted file mode 100644 index 1f019e1274..0000000000 --- a/.changelog/v0.9.0/improvements/1556-arch-doc.md +++ /dev/null @@ -1,5 +0,0 @@ -- Add architecture.md doc that gives a high-level overview of the structure of the codebase. -- Add some module-level documentation - ([#1556][1556]) - -[1556]: https://github.com/informalsystems/ibc-rs/pulls/1556 diff --git a/.changelog/v0.9.0/improvements/ibc-relayer-cli/1515-single-line-errors.md b/.changelog/v0.9.0/improvements/ibc-relayer-cli/1515-single-line-errors.md deleted file mode 100644 index 340a87363b..0000000000 --- a/.changelog/v0.9.0/improvements/ibc-relayer-cli/1515-single-line-errors.md +++ /dev/null @@ -1,2 +0,0 @@ -- Output errors on a single line if ANSI output is disabled - ([#1515](https://github.com/informalsystems/ibc-rs/issues/1515)) \ No newline at end of file diff --git a/.changelog/v0.9.0/improvements/ibc-relayer-cli/1555-fee-amount-overflow.md b/.changelog/v0.9.0/improvements/ibc-relayer-cli/1555-fee-amount-overflow.md deleted file mode 100644 index 9acc77b50e..0000000000 --- a/.changelog/v0.9.0/improvements/ibc-relayer-cli/1555-fee-amount-overflow.md +++ /dev/null @@ -1,3 +0,0 @@ -- Compute fee amount using big integers to prevent overflow - when using denominations with high decimal places - ([#1555](https://github.com/informalsystems/ibc-rs/issues/1555)) \ No newline at end of file diff --git a/.changelog/v0.9.0/improvements/ibc-relayer/1479-abort-failed-simulated-txs.md b/.changelog/v0.9.0/improvements/ibc-relayer/1479-abort-failed-simulated-txs.md deleted file mode 100644 index fdf23c78d0..0000000000 --- a/.changelog/v0.9.0/improvements/ibc-relayer/1479-abort-failed-simulated-txs.md +++ /dev/null @@ -1,3 +0,0 @@ -- The relayer will now avoid submitting a tx after the simulation failed - (in all but one special case) to avoid wasting fees unnecessarily - ([#1479](https://github.com/informalsystems/ibc-rs/issues/1479)) \ No newline at end of file diff --git a/.changelog/v0.9.0/improvements/ibc/1546-add-partialeq-ibcevent.md b/.changelog/v0.9.0/improvements/ibc/1546-add-partialeq-ibcevent.md deleted file mode 100644 index af9048c549..0000000000 --- a/.changelog/v0.9.0/improvements/ibc/1546-add-partialeq-ibcevent.md +++ /dev/null @@ -1,2 +0,0 @@ -- Derive `PartialEq` and `Eq` on `IbcEvent` and inner types - ([#1546](https://github.com/informalsystems/ibc-rs/issues/1546)) diff --git a/.changelog/v0.9.0/summary.md b/.changelog/v0.9.0/summary.md deleted file mode 100644 index 27fb85dc71..0000000000 --- a/.changelog/v0.9.0/summary.md +++ /dev/null @@ -1,52 +0,0 @@ -*November 23rd, 2021* - -> This release honors Anca Zamfir, who has lead ibc-rs from its inception and through its first two years of life. -> The whole team is grateful for her dedication and the nurturing environment she created. -> To many more achievements, Anca!! 🥂 - -#### Notice for operators - -This release requires operators to update their Hermes configuration. -The `mode` configuration section now replaces the `strategy` option. - -##### `strategy = 'packets'` - -If Hermes was configured with `strategy = 'packets'`, then the configuration needs to be changed in the following way: - -```diff - [global] --strategy = 'packets' - log_level = 'trace' --clear_packets_interval = 100 --tx_confirmation = true -+ -+[mode] -+ -+[mode.clients] -+enabled = true -+refresh = true -+misbehaviour = true -+ -+[mode.connections] -+enabled = false -+ -+[mode.channels] -+enabled = false -+ -+[mode.packets] -+enabled = true -+clear_interval = 100 -+clear_on_start = true -+filter = false -+tx_confirmation = true -``` - -##### `strategy = 'all'` - -If Hermes was configured to complete connection and channel handshakes as well, ie. with `strategy = 'all'`, -then on top of the changes above, `mode.connections.enabled` and `mode.chhanels.enabled` must be set to `true`. - -[See the relevant section][config-mode-toml] of the documented `config.toml` file in the repository for more details. - -[config-mode-toml]: https://github.com/informalsystems/ibc-rs/blob/v0.9.0/config.toml#L9-L59 - diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 9f970225ad..0000000000 --- a/.dockerignore +++ /dev/null @@ -1 +0,0 @@ -target/ \ No newline at end of file diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 55403d0f2b..0000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,15 +0,0 @@ -# CODEOWNERS: https://help.github.com/articles/about-codeowners/ - -modules/ @hu55a1n1 @plafer @ancazamfir - -tools/ @soareschen - -docs/ @seanchen1991 @adizere - -guide/ @seanchen1991 - -relayer-cli/ @mzabaluev @romac @ancazamfir - -ci/ @mzabaluev @soareschen - -relayer/ @adizere @romac @seanchen1991 @ancazamfir \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md deleted file mode 100644 index fae32a106a..0000000000 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -name: Bug Report -about: Create a report to help us squash bugs! - ---- - - - -## Summary of Bug - - - -## Version - - - -## Steps to Reproduce - - - - -## Acceptance Criteria - - - -____ - -## For Admin Use - -- [ ] Not duplicate issue -- [ ] Appropriate labels applied -- [ ] Appropriate milestone (priority) applied -- [ ] Appropriate contributors tagged -- [ ] Contributor assigned/self-assigned diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md deleted file mode 100644 index b2a60b2bb2..0000000000 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -name: Feature Request -about: Create a proposal to request a feature - ---- - - - -## Summary - - - -## Problem Definition - - - -## Proposal - - - -## Acceptance Criteria - - - -____ - -#### For Admin Use - -- [ ] Not duplicate issue -- [ ] Appropriate labels applied -- [ ] Appropriate milestone (priority) applied -- [ ] Appropriate contributors tagged -- [ ] Contributor assigned/self-assigned diff --git a/.github/ISSUE_TEMPLATE/release-template.md b/.github/ISSUE_TEMPLATE/release-template.md deleted file mode 100644 index e1f3734d07..0000000000 --- a/.github/ISSUE_TEMPLATE/release-template.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -name: New Release Request -about: Create a proposal to track the release of a new version of IBC-RS ---- - - - - -# Release IBC-RS v.X.Y.Z - -⚡ - -- [ ] Create a new release in the changelog, using [`unclog`](https://github.com/informalsystems/unclog). - - If doing a release candidate (`rc`) version, then skip the `unclog release` step. -- [ ] Bump all crate versions to the new version. -- [ ] Reassign unfinished issues of previous milestone to the next milestone. -- [ ] Update Cargo.lock file (if re-publishing `ibc-relayer-cli`) diff --git a/.github/ISSUE_TEMPLATE/rust-update.md b/.github/ISSUE_TEMPLATE/rust-update.md deleted file mode 100644 index 2763d111ec..0000000000 --- a/.github/ISSUE_TEMPLATE/rust-update.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -name: Rust version update -about: A checklist to perform to update to a new version of Rust. ---- - -# Update to Rust release 1.x.y - -🦀 - -- [ ] Update the version in `rust-toolchain.toml`. -- [ ] Run `cargo clippy --all-features --fix`, review and commit the automatic - fixes, and fix all reported lints. Try to resolve the root causes for - the lints rather than silencing them with `#[allow(...)]`. - -## Update the MSRV (optional) - -Additional steps to perform in order to make the new minimal supported -Rust version: - -- [ ] Update the `rust-version` fields in all `Cargo.toml` files. -- [ ] Update the `msrv` field in `clippy.toml` and fix all lints reported by - `cargo clippy --all-features`. -- [ ] Update the MSRV shields in README files: - - `README.md` - - `relayer-rest/README.md` -- [ ] Update the MSRV in the guide: `guide/src/pre_requisites.md` -- [ ] Add a `.changelog` entry to the `breaking-changes` section, - announcing the new MSRV. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index e20e5078bd..0000000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,27 +0,0 @@ - - -Closes: #XXX - -## Description - - - - -______ - -### PR author checklist: -- [ ] Added changelog entry, using [`unclog`](https://github.com/informalsystems/unclog). -- [ ] Added tests: integration (for Hermes) or unit/mock tests (for modules). -- [ ] Linked to GitHub issue. -- [ ] Updated code comments and documentation (e.g., `docs/`). - -### Reviewer checklist: - -- [ ] Reviewed `Files changed` in the GitHub PR explorer. -- [ ] Manually tested (in case integration/unit/mock tests are absent). \ No newline at end of file diff --git a/.github/actions-rs/grcov.yml b/.github/actions-rs/grcov.yml deleted file mode 100644 index d70ee56d03..0000000000 --- a/.github/actions-rs/grcov.yml +++ /dev/null @@ -1,6 +0,0 @@ -branch: true -ignore-not-existing: true -llvm: true -output-type: lcov -output-file: ./lcov.info -prefix-dir: /home/user/build/ diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 981972a431..0000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,42 +0,0 @@ -# Automatically open PRs to update outdated deps -# See https://docs.github.com/en/github/administering-a-repository/enabling-and-disabling-version-updates - -version: 2 -updates: - # Enable version updates for npm - - package-ecosystem: "cargo" - # Look for Cargo `.toml` and `.lock` files in the `root` directory - directory: "/" - # Check the cargo registry for updates every day (weekdays) - schedule: - interval: "weekly" - - - package-ecosystem: "cargo" - directory: "modules" - schedule: - interval: "weekly" - - - package-ecosystem: "cargo" - directory: "proto" - schedule: - interval: "weekly" - -## Disable dependabot for `proto-compiler`. -## Rationale: we maintain the dependencies for proto-compiler -## manually, so that we update the proto-compiler binary -## (and the Cargo.lock file) only when we regenerate -## Rust types from the .proto files. -# - package-ecosystem: "cargo" -# directory: "proto-compiler" -# schedule: -# interval: "weekly" - - - package-ecosystem: "cargo" - directory: "relayer" - schedule: - interval: "weekly" - - - package-ecosystem: "cargo" - directory: "relayer-cli" - schedule: - interval: "weekly" diff --git a/.github/markdown-link-check.json b/.github/markdown-link-check.json deleted file mode 100644 index 840f1615ef..0000000000 --- a/.github/markdown-link-check.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "ignorePatterns": [ - { - "pattern": "^https://crates.io" - }, - { - "pattern": "^https?://localhost" - } - ], - "aliveStatusCodes": [429, 200] -} diff --git a/.github/workflows/audit.yaml b/.github/workflows/audit.yaml deleted file mode 100644 index 4ca65ae9f7..0000000000 --- a/.github/workflows/audit.yaml +++ /dev/null @@ -1,19 +0,0 @@ -name: Security Audit -on: - schedule: - - cron: '0 0 * * *' - -jobs: - security_audit: - name: Security Audit - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Cache cargo bin - uses: actions/cache@v1 - with: - path: ~/.cargo/bin - key: ${{ runner.os }}-cargo-audit-v0.11.2 - - uses: actions-rs/audit-check@v1.2.0 - with: - token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/cargo-doc.yaml b/.github/workflows/cargo-doc.yaml deleted file mode 100644 index 1d2a83f83b..0000000000 --- a/.github/workflows/cargo-doc.yaml +++ /dev/null @@ -1,27 +0,0 @@ -name: Publish Cargo Doc - -on: - push: - branches: - - master - pull_request: {} - -jobs: - deploy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - - - run: cargo doc - - - name: Deploy - if: github.ref == 'refs/heads/master' - uses: peaceiris/actions-gh-pages@v3 - with: - deploy_key: ${{ secrets.IBC_RS_DOC_PRIVATE_KEY }} - external_repository: informalsystems/ibc-rs-doc - publish_dir: ./target/doc diff --git a/.github/workflows/e2e-gaia-current-release.yaml b/.github/workflows/e2e-gaia-current-release.yaml deleted file mode 100644 index 398e1c30c9..0000000000 --- a/.github/workflows/e2e-gaia-current-release.yaml +++ /dev/null @@ -1,60 +0,0 @@ -name: End to End testing (Gaia - v7.0.1) -on: - pull_request: - paths: - - .github/workflows/e2e.yaml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - push: - branches: master - paths: - - .github/workflows/e2e.yaml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - -env: - CARGO_INCREMENTAL: 0 - CARGO_PROFILE_DEV_DEBUG: 1 - CARGO_PROFILE_RELEASE_DEBUG: 1 - RUST_BACKTRACE: short - CARGO_NET_RETRY: 10 - RUSTUP_MAX_RETRIES: 10 - -jobs: - test-end-to-end-current-gaia: - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: build - args: --workspace - - run: cp ./target/debug/hermes . - - name: Build relayer image - run: docker-compose -f ci/docker-compose-gaia-current.yml build relayer - - name: Start chains and relayer - run: docker-compose -f ci/docker-compose-gaia-current.yml up -d ibc-0 ibc-1 relayer - - name: Run relayer workflow - continue-on-error: false - run: docker exec relayer /bin/sh -c /relayer/e2e.sh diff --git a/.github/workflows/e2e-gaia-legacy-release.yaml b/.github/workflows/e2e-gaia-legacy-release.yaml deleted file mode 100644 index f128d65e57..0000000000 --- a/.github/workflows/e2e-gaia-legacy-release.yaml +++ /dev/null @@ -1,59 +0,0 @@ -name: End to End testing (Gaia - v6.0.0) -on: - pull_request: - paths: - - .github/workflows/e2e.yaml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - push: - branches: master - paths: - - .github/workflows/e2e.yaml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - -env: - CARGO_INCREMENTAL: 0 - CARGO_PROFILE_DEV_DEBUG: 1 - CARGO_PROFILE_RELEASE_DEBUG: 1 - RUST_BACKTRACE: short - CARGO_NET_RETRY: 10 - RUSTUP_MAX_RETRIES: 10 - -jobs: - test-end-to-end-current-gaia: - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: build - args: --workspace - - run: cp ./target/debug/hermes . - - name: Build relayer image - run: docker-compose -f ci/docker-compose-gaia-legacy.yml build relayer - - name: Start chains and relayer - run: docker-compose -f ci/docker-compose-gaia-legacy.yml up -d ibc-0 ibc-1 relayer - - name: Run relayer workflow - run: docker exec relayer /bin/sh -c /relayer/e2e.sh diff --git a/.github/workflows/guide.yml b/.github/workflows/guide.yml deleted file mode 100644 index 3ee6996edc..0000000000 --- a/.github/workflows/guide.yml +++ /dev/null @@ -1,59 +0,0 @@ -name: Hermes Guide - -on: - push: - branches: - - master - tags: - - v[0-9]+.* - pull_request: - paths: - - guide/** - -jobs: - guide: - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v2 - - - name: cache .cargo directory - uses: actions/cache@v1 - with: - path: ~/.cargo - key: cargo-dir - restore-keys: | - cargo-dir - - - name: Install mdbook - uses: actions-rs/install@v0.1 - with: - crate: mdbook - version: latest - use-tool-cache: true - - - name: Install mdbook-mermaid - uses: actions-rs/install@v0.1 - with: - crate: mdbook-mermaid - version: latest - use-tool-cache: true - - - name: Install mdbook-toc - uses: actions-rs/install@v0.1 - with: - crate: mdbook-toc - version: latest - use-tool-cache: true - - - name: Build guide - run: | - cd guide - mdbook build - - # Only deploy guide when releasing a new version of Hermes - - name: Deploy to GitHub Pages - uses: peaceiris/actions-gh-pages@v3 - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: ./guide/book diff --git a/.github/workflows/integration.yaml b/.github/workflows/integration.yaml deleted file mode 100644 index c0b92677b9..0000000000 --- a/.github/workflows/integration.yaml +++ /dev/null @@ -1,214 +0,0 @@ -name: Rust -on: - pull_request: - paths: - - .github/workflows/integration.yaml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - - tools/** - push: - branches: master - paths: - - .github/workflows/integration.yaml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - - tools/** - -env: - CARGO_INCREMENTAL: 0 - CARGO_PROFILE_DEV_DEBUG: 1 - CARGO_PROFILE_RELEASE_DEBUG: 1 - RUST_BACKTRACE: short - CARGO_NET_RETRY: 10 - RUSTUP_MAX_RETRIES: 10 - -jobs: - integration-test: - runs-on: ubuntu-latest - strategy: - matrix: - gaiad: - - gaia5 - - gaia6 - - gaia7 - steps: - - uses: actions/checkout@v2 - - uses: cachix/install-nix-action@v15 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v10 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --no-fail-fast --no-run - - env: - RUST_LOG: info - RUST_BACKTRACE: 1 - NO_COLOR_LOG: 1 - run: | - nix shell .#python .#${{ matrix.gaiad }} -c cargo \ - test -p ibc-integration-test --no-fail-fast -- \ - --nocapture --test-threads=2 - - ibc-go-integration-test: - runs-on: ubuntu-latest - strategy: - matrix: - simapp: - - ibc-go-v2-simapp - - ibc-go-v3-simapp - steps: - - uses: actions/checkout@v2 - - uses: cachix/install-nix-action@v15 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v10 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --no-fail-fast --no-run - - env: - RUST_LOG: info - RUST_BACKTRACE: 1 - CHAIN_COMMAND_PATH: simd - run: | - nix shell .#python .#${{ matrix.simapp }} -c cargo \ - test -p ibc-integration-test --no-fail-fast -- --nocapture --test-threads=2 - - ordered-channel-test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: cachix/install-nix-action@v15 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v10 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --no-fail-fast --no-run - - env: - RUST_LOG: info - RUST_BACKTRACE: 1 - NO_COLOR_LOG: 1 - run: | - nix shell .#python .#gaia6-ordered -c cargo \ - test -p ibc-integration-test --features ordered --no-fail-fast -- \ - --nocapture --test-threads=1 test_ordered_channel - - ica-filter-test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: cachix/install-nix-action@v15 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v10 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --no-fail-fast --no-run - - env: - RUST_LOG: info - RUST_BACKTRACE: 1 - NO_COLOR_LOG: 1 - CHAIN_COMMAND_PATH: icad - run: | - nix shell .#python .#ica -c cargo \ - test -p ibc-integration-test --features ica --no-fail-fast -- \ - --nocapture --test-threads=1 test_ica_filter - - model-based-test: - runs-on: ubuntu-latest - strategy: - matrix: - gaiad: - - gaia6 - steps: - - uses: actions/checkout@v2 - - uses: cachix/install-nix-action@v15 - with: - install_url: https://nixos-nix-install-tests.cachix.org/serve/vij683ly7sl95nnhb67bdjjfabclr85m/install - install_options: '--tarball-url-prefix https://nixos-nix-install-tests.cachix.org/serve' - extra_nix_config: | - experimental-features = nix-command flakes - - uses: cachix/cachix-action@v10 - with: - name: cosmos - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: test - args: -p ibc-integration-test --features mbt --no-fail-fast --no-run - # Disable running MBT tests until flakiness is addressed - # - env: - # RUST_LOG: debug - # RUST_BACKTRACE: 1 - # NO_COLOR_LOG: 1 - # run: | - # nix shell \ - # .#${{ matrix.gaiad }} \ - # .#apalache \ - # -c cargo \ - # test -p ibc-integration-test --features mbt --no-fail-fast -- \ - # --nocapture --test-threads=1 mbt diff --git a/.github/workflows/markdown-link-check.yml b/.github/workflows/markdown-link-check.yml deleted file mode 100644 index c699a85514..0000000000 --- a/.github/workflows/markdown-link-check.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Check Markdown links - -on: - schedule: - - cron: '0 0 * * *' - - workflow_dispatch: - -jobs: - md-link-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: markdown-link-check - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13 - with: - config-file: '.github/markdown-link-check.json' - use-quiet-mode: 'yes' - use-verbose-mode: 'yes' diff --git a/.github/workflows/no-std.yaml b/.github/workflows/no-std.yaml deleted file mode 100644 index d9020d1de1..0000000000 --- a/.github/workflows/no-std.yaml +++ /dev/null @@ -1,47 +0,0 @@ -name: no_std check -on: - pull_request: - paths: - - .github/workflows/no-std.yml - - Cargo.toml - - Cargo.lock - - ci/** - - proto/** - - modules/** - push: - branches: master - paths: - - .github/workflows/no-std.yml - - Cargo.toml - - Cargo.lock - - ci/** - - proto/** - - modules/** - -jobs: - check-no-std-panic-conflict: - name: Check no_std panic conflict - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - run: | - cd ci/no-std-check - make check-panic-conflict - - check-substrate: - name: Check no_std substrate support - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: nightly - target: wasm32-unknown-unknown - override: true - - run: | - cd ci/no-std-check - make check-substrate diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index fb64ad8395..0000000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,80 +0,0 @@ -# Uploads Hermes binary. -# Ref: https://github.com/marketplace/actions/build-and-upload-rust-binary-to-github-releases - -name: Release - -on: - push: - tags: - - v[0-9]+.* - -jobs: - create-release: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: taiki-e/create-gh-release-action@v1 - env: - # (required) - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - upload-assets: - strategy: - matrix: - config: - - { os: ubuntu-latest, target: x86_64-unknown-linux-gnu } - - { os: ubuntu-latest, target: aarch64-unknown-linux-gnu } - - { os: macos-latest, target: x86_64-apple-darwin } - runs-on: ${{ matrix.config.os }} - steps: - - uses: actions/checkout@v2 - - uses: taiki-e/upload-rust-binary-action@v1 - with: - # (required) - bin: hermes - # (optional) Target triple - target: ${{ matrix.config.target }} - # (optional) On which platform to distribute the `.tar.gz` file. - # [default value: unix] - # [possible values: all, unix, windows, none] - tar: unix - # (optional) On which platform to distribute the `.zip` file. - # [default value: windows] - # [possible values: all, unix, windows, none] - zip: unix - # (optional) Archive name (non-extension portion of filename) to be uploaded. - # [default value: $bin-$target] - # [possible values: the following variables and any string] - # variables: - # - $bin - Binary name (non-extension portion of filename). - # - $target - Target triple. - # - $tag - Tag of this release. - archive: $bin-$tag-$target - env: - # (required) - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - # (optional) - CARGO_PROFILE_RELEASE_LTO: true - docker-release: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Set up Docker Buildx - id: buildx - uses: docker/setup-buildx-action@v1 - - name: Login to Docker Hub - uses: docker/login-action@v1 - with: - username: ${{ secrets.DOCKER_HUB_USERNAME }} - password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} - - name: Get release version - run: echo "TAG=${GITHUB_REF#refs/tags/v}" >> $GITHUB_ENV - - name: Build and push - id: docker_build - uses: docker/build-push-action@v2 - with: - context: ./ci/ - file: ./ci/hermes.Dockerfile - push: true - build-args: TAG=v${{env.TAG}} - tags: informalsystems/hermes:${{env.TAG}} diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml deleted file mode 100644 index aa4d85f717..0000000000 --- a/.github/workflows/rust.yml +++ /dev/null @@ -1,139 +0,0 @@ -name: Rust -on: - pull_request: - paths: - - .github/workflows/rust.yml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - - tools/** - push: - branches: master - paths: - - .github/workflows/rust.yml - - Cargo.toml - - Cargo.lock - - ci/** - - e2e/** - - proto/** - - modules/** - - relayer/** - - relayer-cli/** - - relayer-rest/** - - telemetry/** - - tools/** - -env: - CARGO_INCREMENTAL: 0 - CARGO_PROFILE_DEV_DEBUG: 1 - CARGO_PROFILE_RELEASE_DEBUG: 1 - RUST_BACKTRACE: short - CARGO_NET_RETRY: 10 - RUSTUP_MAX_RETRIES: 10 - -jobs: - cleanup-runs: - runs-on: ubuntu-latest - steps: - - uses: rokroskar/workflow-run-cleanup-action@master - env: - GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" - if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/master'" - - fmt: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all -- --check - - clippy_all_features: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - components: clippy - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/clippy-check@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - args: --all-features --all-targets - - clippy_no_default_features: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - components: clippy - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/clippy-check@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - args: --no-default-features --all-targets - - test-stable: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: test - args: --all-features --no-fail-fast --no-run - - uses: actions-rs/cargo@v1 - with: - command: test - args: --all-features --no-fail-fast --workspace --exclude ibc-integration-test -- --nocapture - - # test-coverage: - # runs-on: ubuntu-latest - # steps: - # - uses: actions/checkout@v2 - # with: - # fetch-depth: 0 - # - uses: actions-rs/toolchain@v1 - # with: - # toolchain: stable - # override: true - # - uses: actions-rs/cargo@v1 - # with: - # command: clean - # - name: Run cargo-tarpaulin - # uses: actions-rs/tarpaulin@v0.1 - # with: - # version: '0.16.0' - # args: '-- --test-threads 1' - # timeout: 600 - # - name: Upload coverage to Codecov - # uses: codecov/codecov-action@v1 - # with: - # token: ${{ secrets.CODECOV_TOKEN }} - # fail_ci_if_error: true - # - name: Archive code coverage results - # uses: actions/upload-artifact@v2 - # with: - # name: code-coverage-report - # path: cobertura.xml diff --git a/.github/workflows/scripts.yaml b/.github/workflows/scripts.yaml deleted file mode 100644 index bf5e48778d..0000000000 --- a/.github/workflows/scripts.yaml +++ /dev/null @@ -1,19 +0,0 @@ -name: ShellCheck Scripts -on: - pull_request: - paths: - - scripts/** - push: - branches: master - paths: - - scripts/** - -jobs: - shellcheck: - runs-on: ubuntu-latest - steps: - - name: Run ShellCheck - uses: ludeeus/action-shellcheck@master - with: - scandir: './scripts' - additional_files: 'scripts/dev-env scripts/one-chain scripts/init-hermes' diff --git a/.github/workflows/specs.yml b/.github/workflows/specs.yml deleted file mode 100644 index acaa60ca77..0000000000 --- a/.github/workflows/specs.yml +++ /dev/null @@ -1,30 +0,0 @@ -name: TLA+ Specs -on: - pull_request: - paths: - - docs/spec/tla/** - push: - branches: master - paths: - - docs/specs/tla/** - -jobs: - typecheck-specs: - runs-on: ubuntu-latest - container: apalache/mc:0.15.2 - env: - working-directory: docs/spec/tla - steps: - - uses: actions/checkout@v2 - - name: IBC Core - run: apalache-mc typecheck IBCCore.tla | grep -F 'Type checker [OK]' - working-directory: ${{env.working-directory}}/ibc-core - - name: Fungible Token Transfer - run: apalache-mc typecheck IBCTokenTransfer.tla | grep -F 'Type checker [OK]' - working-directory: ${{env.working-directory}}/fungible-token-transfer - - name: ICS 02 Client / Single Chain - run: apalache-mc typecheck ICS02SingleChainEnvironment.tla | grep -F 'Type checker [OK]' - working-directory: ${{env.working-directory}}/client - - name: ICS 02 Client / Two Chains - run: apalache-mc typecheck ICS02TwoChainsEnvironment.tla | grep -F 'Type checker [OK]' - working-directory: ${{env.working-directory}}/client diff --git a/.gitignore b/.gitignore index 2e9b0e8816..13a9c9349b 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,9 @@ # will have compiled files and executables target/ +# Ignore cargo patch +.cargo + # These are backup files generated by rustfmt **/*.rs.bk diff --git a/.rustfmt.toml b/.rustfmt.toml index 8c83f08221..8b3be5e7d9 100644 --- a/.rustfmt.toml +++ b/.rustfmt.toml @@ -1,9 +1,23 @@ +# Basic +hard_tabs = true max_width = 100 +use_small_heuristics = "Max" +# Imports +imports_granularity = "Crate" reorder_imports = true - -# nightly only - -# unstable_features = true -# format_strings = false -# comment_width = 100 -# wrap_comments = true +# Consistency +newline_style = "Unix" +# Format comments +comment_width = 100 +wrap_comments = true +# Misc +binop_separator = "Back" +chain_width = 80 +match_arm_blocks = false +match_arm_leading_pipes = "Preserve" +match_block_trailing_comma = true +reorder_impl_items = false +spaces_around_ranges = false +trailing_comma = "Vertical" +trailing_semicolon = false +use_field_init_shorthand = true diff --git a/Cargo.lock b/Cargo.lock index fc8906a7c2..92084e53a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3,51 +3,13 @@ version = 3 [[package]] -name = "abscissa_core" -version = "0.6.0" +name = "Inflector" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6750843603bf31a83accd3c8177f9dbf53a7d64275688fc7371e0a4d9f8628b5" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" dependencies = [ - "abscissa_derive", - "arc-swap", - "backtrace", - "canonical-path", - "clap", - "color-eyre", - "fs-err", - "once_cell", + "lazy_static", "regex", - "secrecy", - "semver", - "serde", - "termcolor", - "toml", - "tracing", - "tracing-log", - "tracing-subscriber", - "wait-timeout", -] - -[[package]] -name = "abscissa_derive" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a3473aa652e90865a06b723102aaa4a54a7d9f2092dbf4582497a61d0537d3f" -dependencies = [ - "ident_case", - "proc-macro2", - "quote", - "syn", - "synstructure", -] - -[[package]] -name = "addr2line" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" -dependencies = [ - "gimli", ] [[package]] @@ -57,10 +19,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] -name = "adler32" -version = "1.2.0" +name = "ahash" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +dependencies = [ + "getrandom 0.2.7", + "once_cell", + "version_check", +] [[package]] name = "aho-corasick" @@ -72,18 +39,12 @@ dependencies = [ ] [[package]] -name = "alloc-no-stdlib" -version = "2.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35ef4730490ad1c4eae5c4325b2a95f521d023e5c885853ff7aca0a6a1631db3" - -[[package]] -name = "alloc-stdlib" -version = "0.2.1" +name = "android_system_properties" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697ed7edc0f1711de49ce108c541623a0af97c6c60b2f6e2b65229847ac843c2" +checksum = "d7ed72e1635e121ca3e79420540282af22da58be50de153d36f81ddc6b83aa9e" dependencies = [ - "alloc-no-stdlib", + "libc", ] [[package]] @@ -97,21 +58,65 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.57" +version = "1.0.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" +checksum = "1485d4d2cc45e7b201ee3767015c96faa5904387c9d87c6efdd0fb511f12d305" [[package]] -name = "arc-swap" -version = "1.5.0" +name = "argh" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" +checksum = "a7e7e4aa7e40747e023c0761dafcb42333a9517575bbf1241747f68dd3177a62" +dependencies = [ + "argh_derive", + "argh_shared", +] [[package]] -name = "ascii" -version = "1.0.0" +name = "argh_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f2bd7ff6ed6414f4e5521bd509bae46454bbd513801767ced3f21a751ab4bc" +dependencies = [ + "argh_shared", + "heck 0.3.3", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "argh_shared" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47253b98986dafc7a3e1cf3259194f1f47ac61abb57a57f46ec09e48d004ecda" + +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "arrayvec" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" +dependencies = [ + "nodrop", +] + +[[package]] +name = "arrayvec" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" + +[[package]] +name = "arrayvec" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbf56136a5198c7b01a49e3afcbef6cf84597273d298f54432926024107b0109" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "async-stream" @@ -136,31 +141,15 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" +checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" dependencies = [ "proc-macro2", "quote", "syn", ] -[[package]] -name = "async-tungstenite" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e00550829ef8e2c4115250d0ee43305649b0fa95f78a32ce5b07da0b73d95c5c" -dependencies = [ - "futures-io", - "futures-util", - "log", - "pin-project-lite", - "tokio", - "tokio-rustls 0.22.0", - "tungstenite", - "webpki-roots 0.21.1", -] - [[package]] name = "atty" version = "0.2.14" @@ -180,9 +169,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.6" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2504b827a8bef941ba3dd64bdffe9cf56ca182908a147edd6189c95fbcae7d" +checksum = "9de18bc5f2e9df8f52da03856bf40e29b747de5a84e43aefff90e3dc4a21529b" dependencies = [ "async-trait", "axum-core", @@ -192,7 +181,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa", + "itoa 1.0.3", "matchit", "memchr", "mime", @@ -209,9 +198,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.4" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da31c0ed7b4690e2c78fe4b880d21cd7db04a346ebc658b4270251b695437f17" +checksum = "e4f44a0e6200e9d11a1cdc989e4b358f6e3d354fbf48478f345a17f4e43f8635" dependencies = [ "async-trait", "bytes", @@ -222,25 +211,10 @@ dependencies = [ ] [[package]] -name = "backtrace" -version = "0.3.65" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11a17d453482a265fd5f8479f2a3f405566e6ca627837aaddb85af8b1ab8ef61" -dependencies = [ - "addr2line", - "cc", - "cfg-if 1.0.0", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base16ct" -version = "0.1.1" +name = "base58" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" +checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" [[package]] name = "base64" @@ -249,49 +223,53 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] -name = "base64ct" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea908e7347a8c64e378c17e30ef880ad73e3b4498346b055c2c00ea342f3179" - -[[package]] -name = "bech32" -version = "0.8.1" +name = "bitflags" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9ff0bbfd639f15c74af777d81383cf53efb7c93613f6cab67c6c11e05bbf8b" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] -name = "bech32" -version = "0.9.0" +name = "bitvec" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5738be7561b0eeb501ef1d5c5db3f24e01ceb55fededd9b00039aada34966ad" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] [[package]] -name = "bitcoin" -version = "0.28.1" +name = "blake2" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05bba324e6baf655b882df672453dbbc527bc938cadd27750ae510aaccc3a66a" +checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388" dependencies = [ - "bech32 0.8.1", - "bitcoin_hashes", - "secp256k1", - "serde", + "digest 0.10.3", ] [[package]] -name = "bitcoin_hashes" -version = "0.10.0" +name = "blake2-rfc" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006cc91e1a1d99819bc5b8214be3555c1f0611b169f527a1fdc54ed1f2b745b0" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" dependencies = [ - "serde", + "arrayvec 0.4.12", + "constant_time_eq", ] [[package]] -name = "bitflags" -version = "1.3.2" +name = "block-buffer" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding 0.1.5", + "byte-tools", + "byteorder", + "generic-array 0.12.4", +] [[package]] name = "block-buffer" @@ -299,8 +277,8 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding", - "generic-array", + "block-padding 0.2.1", + "generic-array 0.14.6", ] [[package]] @@ -309,7 +287,16 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" dependencies = [ - "generic-array", + "generic-array 0.14.6", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", ] [[package]] @@ -319,116 +306,81 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" [[package]] -name = "brotli" -version = "3.3.4" +name = "borsh" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", + "borsh-derive", + "hashbrown 0.11.2", ] [[package]] -name = "brotli-decompressor" -version = "2.3.2" +name = "borsh-derive" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate 0.1.5", + "proc-macro2", + "syn", ] [[package]] -name = "buf_redux" -version = "0.8.4" +name = "borsh-derive-internal" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" dependencies = [ - "memchr", - "safemem", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "bumpalo" -version = "3.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" - -[[package]] -name = "bytecount" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72feb31ffc86498dacdbd0fcebb56138e7177a8cc5cea4516031d15ae85a742e" - -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - -[[package]] -name = "bytes" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" - -[[package]] -name = "bzip2" -version = "0.4.3" +name = "borsh-schema-derive-internal" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6afcd980b5f3a45017c57e57a2fcccbb351cc43a356ce117ef760ef8052b89b0" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" dependencies = [ - "bzip2-sys", - "libc", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "bzip2-sys" -version = "0.1.11+1.0.8" +name = "bumpalo" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" -dependencies = [ - "cc", - "libc", - "pkg-config", -] +checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d" [[package]] -name = "camino" -version = "1.0.9" +name = "byte-slice-cast" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "869119e97797867fd90f5e22af7d0bd274bd4635ebb9eb68c04f3f513ae6c412" -dependencies = [ - "serde", -] +checksum = "87c5fdd0166095e1d463fc6cc01aa8ce547ad77a4e84d42eb6762b084e28067e" [[package]] -name = "canonical-path" -version = "2.0.2" +name = "byte-tools" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6e9e01327e6c86e92ec72b1c798d4a94810f147209bbe3ffab6a86954937a6f" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] -name = "cargo-platform" -version = "0.1.2" +name = "byteorder" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" -dependencies = [ - "serde", -] +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] -name = "cargo_metadata" -version = "0.14.2" +name = "bytes" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" dependencies = [ - "camino", - "cargo-platform", - "semver", "serde", - "serde_json", ] [[package]] @@ -436,12 +388,9 @@ name = "cc" version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" - -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +dependencies = [ + "jobserver", +] [[package]] name = "cfg-if" @@ -451,11 +400,11 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" dependencies = [ - "libc", + "iana-time-zone", "num-integer", "num-traits", "winapi", @@ -469,16 +418,16 @@ checksum = "fff857943da45f546682664a79488be82e69e43c1a7a2307679ab9afb3a66d2e" [[package]] name = "clap" -version = "3.1.18" +version = "3.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2dbdf4bdacb33466e854ce889eee8dfd5729abf7ccd7664d0a2d60cd384440b" +checksum = "29e724a68d9319343bb3328c9cc2dfde263f4b3142ee1059a9980580171c954b" dependencies = [ "atty", "bitflags", "clap_derive", "clap_lex", "indexmap", - "lazy_static", + "once_cell", "strsim", "termcolor", "textwrap", @@ -486,20 +435,20 @@ dependencies = [ [[package]] name = "clap_complete" -version = "3.1.4" +version = "3.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da92e6facd8d73c22745a5d3cbb59bdf8e46e3235c923e516527d8e81eec14a4" +checksum = "e4179da71abd56c26b54dd0c248cc081c1f43b0a1a7e8448e28e57a29baa993d" dependencies = [ "clap", ] [[package]] name = "clap_derive" -version = "3.1.18" +version = "3.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25320346e922cffe59c0bbc5410c8d8784509efb321488971081313cb1e1a33c" +checksum = "13547f7012c01ab4a0e8f8967730ada8f9fdf419e8b6c792788f39cf4e46eefa" dependencies = [ - "heck", + "heck 0.4.0", "proc-macro-error", "proc-macro2", "quote", @@ -508,80 +457,35 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.2.0" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a37c35f1112dad5e6e0b1adaff798507497a18fceeb30cceb3bae7d1427b9213" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" dependencies = [ "os_str_bytes", ] [[package]] -name = "color-eyre" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ebf286c900a6d5867aeff75cfee3192857bb7f24b547d4f0df2ed6baa812c90" -dependencies = [ - "backtrace", - "color-spantrace", - "eyre", - "indenter", - "once_cell", - "owo-colors", - "tracing-error", -] - -[[package]] -name = "color-spantrace" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba75b3d9449ecdccb27ecbc479fdc0b87fa2dd43d2f8298f9bf0e59aacc8dce" -dependencies = [ - "once_cell", - "owo-colors", - "tracing-core", - "tracing-error", -] - -[[package]] -name = "console" -version = "0.15.0" +name = "cmake" +version = "0.1.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28b32d32ca44b70c3e4acd7db1babf555fa026e385fb95f18028f88848b3c31" +checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" dependencies = [ - "encode_unicode", - "libc", - "once_cell", - "regex", - "terminal_size", - "unicode-width", - "winapi", + "cc", ] [[package]] -name = "const-oid" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c78c047431fee22c1a7bb92e00ad095a02a983affe4d8a72e2a2c62c1b94f3" - -[[package]] -name = "contracts" -version = "0.6.3" +name = "constant_time_eq" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1d1429e3bd78171c65aa010eabcdf8f863ba3254728dbfb0ad4b1545beac15c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] -name = "core-foundation" -version = "0.9.3" +name = "convert_case" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" dependencies = [ - "core-foundation-sys", - "libc", + "unicode-segmentation", ] [[package]] @@ -592,9 +496,9 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.2.2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" +checksum = "dc948ebb96241bb40ab73effeb80d9f93afaad49359d159a5e61be51619fe813" dependencies = [ "libc", ] @@ -605,115 +509,67 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "crossbeam-channel" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" -dependencies = [ - "crossbeam-utils 0.7.2", - "maybe-uninit", + "cfg-if", ] [[package]] name = "crossbeam-channel" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.8", + "cfg-if", + "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-epoch 0.9.8", - "crossbeam-utils 0.8.8", + "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.8.2" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" +checksum = "045ebe27666471bb549370b4b0b3e51b07f56325befa4284db65fc89c02511b1" dependencies = [ "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset 0.5.6", + "cfg-if", + "crossbeam-utils", + "memoffset", + "once_cell", "scopeguard", ] [[package]] -name = "crossbeam-epoch" -version = "0.9.8" +name = "crossbeam-utils" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" +checksum = "51887d4adc7b564537b15adcfb307936f8075dfcd5f00dde9a9f1d29383682bc" dependencies = [ - "autocfg", - "cfg-if 1.0.0", - "crossbeam-utils 0.8.8", - "lazy_static", - "memoffset 0.6.5", - "scopeguard", + "cfg-if", + "once_cell", ] [[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "lazy_static", -] - -[[package]] -name = "crossbeam-utils" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" -dependencies = [ - "cfg-if 1.0.0", - "lazy_static", -] - -[[package]] -name = "crunchy" -version = "0.2.2" +name = "crunchy" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" -[[package]] -name = "crypto-bigint" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c6a1d5fa1de37e071642dfa44ec552ca5b299adb128fab16138e24b548fd21" -dependencies = [ - "generic-array", - "rand_core 0.6.3", - "subtle", - "zeroize", -] - [[package]] name = "crypto-common" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array", + "generic-array 0.14.6", "typenum", ] @@ -723,7 +579,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array", + "generic-array 0.14.6", "subtle", ] @@ -733,17 +589,21 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ - "generic-array", + "generic-array 0.14.6", "subtle", ] [[package]] -name = "ct-logs" -version = "0.8.0" +name = "curve25519-dalek" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1a816186fa68d9e426e3cb4ae4dff1fcd8e4a2c34b781bf7a822574a0d0aac8" +checksum = "4a9b85542f99a2dfa2a1b8e192662741c9859a846b296bef1c92ef9b58b5a216" dependencies = [ - "sct 0.6.1", + "byteorder", + "digest 0.8.1", + "rand_core 0.5.1", + "subtle", + "zeroize", ] [[package]] @@ -760,32 +620,16 @@ dependencies = [ ] [[package]] -name = "dashmap" -version = "4.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e77a43b28d0668df09411cb0bc9a8c2adc40f9a048afe863e05fd43251e8e39c" -dependencies = [ - "cfg-if 1.0.0", - "num_cpus", -] - -[[package]] -name = "deflate" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f95bf05dffba6e6cce8dfbb30def788154949ccd9aed761b472119c21e01c70" -dependencies = [ - "adler32", - "gzip-header", -] - -[[package]] -name = "der" -version = "0.5.1" +name = "curve25519-dalek-ng" +version = "4.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6919815d73839e7ad218de758883aae3a257ba6759ce7a9992501efbb53d705c" +checksum = "1c359b7249347e46fb28804470d071c921156ad62b3eef5d34e2ba867533dec8" dependencies = [ - "const-oid", + "byteorder", + "digest 0.9.0", + "rand_core 0.6.3", + "subtle-ng", + "zeroize", ] [[package]] @@ -800,14 +644,12 @@ dependencies = [ ] [[package]] -name = "dialoguer" -version = "0.10.1" +name = "digest" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8c8ae48e400addc32a8710c8d62d55cb84249a7d58ac4cd959daecfbaddc545" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" dependencies = [ - "console", - "tempfile", - "zeroize", + "generic-array 0.12.4", ] [[package]] @@ -816,7 +658,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array", + "generic-array 0.14.6", ] [[package]] @@ -827,6 +669,7 @@ checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" dependencies = [ "block-buffer 0.10.2", "crypto-common", + "subtle", ] [[package]] @@ -838,16 +681,6 @@ dependencies = [ "dirs-sys", ] -[[package]] -name = "dirs-next" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" -dependencies = [ - "cfg-if 1.0.0", - "dirs-sys-next", -] - [[package]] name = "dirs-sys" version = "0.3.7" @@ -860,34 +693,38 @@ dependencies = [ ] [[package]] -name = "dirs-sys-next" -version = "0.1.2" +name = "downcast-rs" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" -dependencies = [ - "libc", - "redox_users", - "winapi", -] +checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" [[package]] -name = "dyn-clone" -version = "1.0.5" +name = "dyn-clonable" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e50f3adc76d6a43f5ed73b698a87d0760ca74617f60f7c3b879003536fdd28" +checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" +dependencies = [ + "dyn-clonable-impl", + "dyn-clone", +] [[package]] -name = "ecdsa" -version = "0.13.4" +name = "dyn-clonable-impl" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0d69ae62e0ce582d56380743515fefaf1a8c70cec685d9677636d7e30ae9dc9" +checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ - "der", - "elliptic-curve", - "rfc6979", - "signature", + "proc-macro2", + "quote", + "syn", ] +[[package]] +name = "dyn-clone" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f94fa09c2aeea5b8839e414b7b841bf429fd25b9c522116ac97ee87856d88b2" + [[package]] name = "ed25519" version = "1.5.2" @@ -898,46 +735,37 @@ dependencies = [ ] [[package]] -name = "ed25519-dalek" -version = "1.0.1" +name = "ed25519-consensus" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" +checksum = "758e2a0cd8a6cdf483e1d369e7d081647e00b88d8953e34d8f2cbba05ae28368" dependencies = [ - "curve25519-dalek", - "ed25519", + "curve25519-dalek-ng", + "hex", + "rand_core 0.6.3", "sha2 0.9.9", "zeroize", ] [[package]] -name = "either" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" - -[[package]] -name = "elliptic-curve" -version = "0.11.12" +name = "ed25519-dalek" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b477563c2bfed38a3b7a60964c49e058b2510ad3f12ba3483fd8f62c2306d6" +checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" dependencies = [ - "base16ct", - "crypto-bigint", - "der", - "ff", - "generic-array", - "group", - "rand_core 0.6.3", - "sec1", - "subtle", + "curve25519-dalek 3.2.0", + "ed25519", + "rand 0.7.3", + "serde", + "sha2 0.9.9", "zeroize", ] [[package]] -name = "encode_unicode" -version = "0.3.6" +name = "either" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "env_logger" @@ -953,13 +781,10 @@ dependencies = [ ] [[package]] -name = "error-chain" -version = "0.12.4" +name = "environmental" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" -dependencies = [ - "version_check", -] +checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" [[package]] name = "eyre" @@ -971,46 +796,46 @@ dependencies = [ "once_cell", ] +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" + [[package]] name = "fastrand" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" dependencies = [ "instant", ] [[package]] -name = "ff" -version = "0.11.1" +name = "fixed-hash" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "131655483be284720a17d74ff97592b8e76576dc25563148601df2d7c9080924" +checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ - "rand_core 0.6.3", - "subtle", + "byteorder", + "rand 0.8.5", + "rustc-hex", + "static_assertions", ] [[package]] -name = "filetime" -version = "0.2.16" +name = "fixedbitset" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0408e2626025178a6a7f7ffc05a25bc47103229f19c113755de7bf63816290c" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "redox_syscall", - "winapi", -] +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39522e96686d38f4bc984b9198e3a0613264abaebaff2c5c918bfa6b6da09af" +checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" dependencies = [ - "cfg-if 1.0.0", "crc32fast", - "libc", "miniz_oxide", ] @@ -1020,7 +845,6 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c606d892c9de11507fa0dcffc116434f94e105d0bbdc4e405b61519464c49d7b" dependencies = [ - "anyhow", "eyre", "paste", ] @@ -1042,16 +866,22 @@ dependencies = [ ] [[package]] -name = "fs-err" -version = "2.7.0" +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" + +[[package]] +name = "funty" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd79fa345a495d3ae89fb7165fec01c0e72f41821d642dda363a1e97975652e" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "ab30e97ab6aacfe635fad58f22c2bb06c8b685f7421eb1e064a729e2a5f481fa" dependencies = [ "futures-channel", "futures-core", @@ -1064,9 +894,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "2bfc52cbddcfd745bf1740338492bb0bd83d76c67b445f91c5fb29fae29ecaa1" dependencies = [ "futures-core", "futures-sink", @@ -1074,32 +904,33 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "d2acedae88d38235936c3922476b10fced7b2b68136f5e3c03c2d5be348a1115" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "1d11aa21b5b587a64682c0094c2bdd4df0076c5324961a40cc3abd7f37930528" dependencies = [ "futures-core", "futures-task", "futures-util", + "num_cpus", ] [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "93a66fc6d035a26a3ae255a6d2bca35eda63ae4c5512bef54449113f7a1228e5" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "0db9cce532b0eae2ccf2766ab246f114b56b9cf6d445e00c2549fbc100ca045d" dependencies = [ "proc-macro2", "quote", @@ -1108,21 +939,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "ca0bae1fe9752cf7fd9b0064c674ae63f97b37bc714d745cbde0afb7ec4e6765" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "842fc63b931f4056a24d59de13fb1272134ce261816e063e634ad0c15cdc5306" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "f0828a5471e340229c11c77ca80017937ce3c58cb788a17e5f1c2d5c485a9577" dependencies = [ "futures-channel", "futures-core", @@ -1138,9 +969,18 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.5" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" +dependencies = [ + "typenum", +] + +[[package]] +name = "generic-array" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", @@ -1152,7 +992,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", "wasi 0.9.0+wasi-snapshot-preview1", @@ -1161,74 +1001,37 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" +checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] [[package]] -name = "gimli" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" - -[[package]] -name = "glob" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" - -[[package]] -name = "group" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5ac374b108929de78460075f3dc439fa66df9d8fc77e8f12caa5165fcf0c89" -dependencies = [ - "ff", - "rand_core 0.6.3", - "subtle", -] - -[[package]] -name = "gumdrop" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bc700f989d2f6f0248546222d9b4258f5b02a171a431f8285a81c08142629e3" -dependencies = [ - "gumdrop_derive", -] - -[[package]] -name = "gumdrop_derive" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "729f9bd3449d77e7831a18abfb7ba2f99ee813dfd15b8c2167c9a54ba20aa99d" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "gzip-header" -version = "0.3.0" +name = "git2" +version = "0.13.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0131feb3d3bb2a5a238d8a4d09f6353b7ebfdc52e77bccbf4ea6eaa751dde639" +checksum = "f29229cc1b24c0e6062f6e742aa3e256492a5323365e5ed3413599f8a5eff7d6" dependencies = [ - "crc32fast", + "bitflags", + "libc", + "libgit2-sys", + "log", + "openssl-probe", + "openssl-sys", + "url", ] [[package]] name = "h2" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +checksum = "5ca32592cf21ac7ccab1825cd87f6c9b3d9022c44d086172ed0966bec8af30be" dependencies = [ "bytes", "fnv", @@ -1244,49 +1047,42 @@ dependencies = [ ] [[package]] -name = "half" -version = "1.8.2" +name = "hash-db" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" [[package]] -name = "hashbrown" -version = "0.11.2" +name = "hash256-std-hasher" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" +dependencies = [ + "crunchy", +] [[package]] -name = "hdpath" -version = "0.6.0" +name = "hashbrown" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72adf5a17a0952ecfcddf8d46d071271d5ee52e78443f07ba0b2dcfe3063a132" +checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" dependencies = [ - "byteorder", + "ahash", ] [[package]] -name = "headers" -version = "0.3.7" +name = "hashbrown" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" -dependencies = [ - "base64", - "bitflags", - "bytes", - "headers-core", - "http", - "httpdate", - "mime", - "sha-1 0.10.0", -] +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] -name = "headers-core" -version = "0.2.0" +name = "heck" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" dependencies = [ - "http", + "unicode-segmentation", ] [[package]] @@ -1332,13 +1128,13 @@ dependencies = [ [[package]] name = "http" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", - "itoa", + "itoa 1.0.3", ] [[package]] @@ -1376,21 +1172,11 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" -[[package]] -name = "humantime-serde" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" -dependencies = [ - "humantime", - "serde", -] - [[package]] name = "hyper" -version = "0.14.18" +version = "0.14.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" +checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" dependencies = [ "bytes", "futures-channel", @@ -1401,7 +1187,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa", + "itoa 1.0.3", "pin-project-lite", "socket2", "tokio", @@ -1410,43 +1196,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-proxy" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca815a891b24fdfb243fa3239c86154392b0953ee584aa1a2a1f66d20cbe75cc" -dependencies = [ - "bytes", - "futures", - "headers", - "http", - "hyper", - "hyper-rustls", - "rustls-native-certs 0.5.0", - "tokio", - "tokio-rustls 0.22.0", - "tower-service", - "webpki 0.21.4", -] - -[[package]] -name = "hyper-rustls" -version = "0.22.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" -dependencies = [ - "ct-logs", - "futures-util", - "hyper", - "log", - "rustls 0.19.1", - "rustls-native-certs 0.5.0", - "tokio", - "tokio-rustls 0.22.0", - "webpki 0.21.4", - "webpki-roots 0.21.1", -] - [[package]] name = "hyper-timeout" version = "0.4.1" @@ -1459,18 +1208,34 @@ dependencies = [ "tokio-io-timeout", ] +[[package]] +name = "iana-time-zone" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad2bfd338099682614d3ee3fe0cd72e0b6a41ca6a87f6a74a3bd593c91650501" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "js-sys", + "wasm-bindgen", + "winapi", +] + [[package]] name = "ibc" version = "0.15.0" dependencies = [ - "bytes", + "borsh", "derive_more", "env_logger", "flex-error", + "ibc-derive", "ibc-proto", "ics23", - "modelator 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "modelator", "num-traits", + "parity-scale-codec", + "primitive-types", "prost", "prost-types", "safe-regex", @@ -1478,35 +1243,30 @@ dependencies = [ "serde_derive", "serde_json", "sha2 0.10.2", + "sha3 0.10.2", + "sp-core 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", "subtle-encoding", "tendermint", - "tendermint-light-client-verifier", "tendermint-proto", "tendermint-rpc", - "tendermint-testgen", "test-log", - "time 0.3.9", + "time", + "tokio", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.3.15", "uint", ] [[package]] -name = "ibc-integration-test" -version = "0.15.0" +name = "ibc-derive" +version = "0.1.0" dependencies = [ - "ibc", - "ibc-proto", - "ibc-relayer", - "ibc-relayer-cli", - "ibc-test-framework", - "modelator 0.4.2 (git+https://github.com/informalsystems/modelator)", - "serde", - "serde_json", - "tempfile", - "tendermint", - "tendermint-rpc", - "time 0.3.9", + "convert_case", + "proc-macro-crate 1.2.1", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -1524,174 +1284,22 @@ dependencies = [ ] [[package]] -name = "ibc-relayer" -version = "0.15.0" +name = "ibc-proto-compiler" +version = "0.2.0" dependencies = [ - "anyhow", - "async-stream", - "bech32 0.9.0", - "bitcoin", - "bytes", - "crossbeam-channel 0.5.4", - "dirs-next", - "env_logger", - "flex-error", - "futures", - "hdpath", - "hex", - "http", - "humantime", - "humantime-serde", - "ibc", - "ibc-proto", - "ibc-telemetry", - "itertools", - "k256", - "moka", - "nanoid", - "num-bigint", - "num-rational", - "prost", - "prost-types", - "regex", - "retry", - "ripemd160", - "semver", - "serde", - "serde_derive", - "serde_json", - "serial_test", - "sha2 0.10.2", - "signature", - "subtle-encoding", - "tendermint", - "tendermint-light-client", - "tendermint-light-client-verifier", - "tendermint-proto", - "tendermint-rpc", - "tendermint-testgen", - "test-log", - "thiserror", - "tiny-bip39", - "tiny-keccak", - "tokio", - "toml", + "argh", + "git2", + "prost-build", + "tempdir", "tonic", - "tracing", - "tracing-subscriber", - "uuid 1.1.1", -] - -[[package]] -name = "ibc-relayer-cli" -version = "0.15.0" -dependencies = [ - "abscissa_core", - "atty", - "clap", - "clap_complete", - "color-eyre", - "console", - "crossbeam-channel 0.5.4", - "dialoguer", - "dirs-next", - "eyre", - "flex-error", - "futures", - "hex", - "humantime", - "ibc", - "ibc-proto", - "ibc-relayer", - "ibc-relayer-rest", - "ibc-telemetry", - "itertools", - "once_cell", - "oneline-eyre", - "regex", - "serde", - "serde_derive", - "serde_json", - "signal-hook", - "subtle-encoding", - "tendermint", - "tendermint-light-client", - "tendermint-light-client-verifier", - "tendermint-proto", - "tendermint-rpc", - "tokio", - "toml", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "ibc-relayer-rest" -version = "0.15.0" -dependencies = [ - "crossbeam-channel 0.5.4", - "ibc", - "ibc-relayer", - "rouille", - "serde", - "serde_json", - "toml", - "tracing", - "ureq", -] - -[[package]] -name = "ibc-telemetry" -version = "0.15.0" -dependencies = [ - "crossbeam-channel 0.5.4", - "ibc", - "moka", - "once_cell", - "opentelemetry", - "opentelemetry-prometheus", - "prometheus", - "rouille", - "uuid 1.1.1", -] - -[[package]] -name = "ibc-test-framework" -version = "0.15.0" -dependencies = [ - "async-trait", - "color-eyre", - "crossbeam-channel 0.5.4", - "env_logger", - "eyre", - "flex-error", - "hex", - "http", - "ibc", - "ibc-proto", - "ibc-relayer", - "ibc-relayer-cli", - "itertools", - "rand 0.8.5", - "semver", - "serde", - "serde_json", - "serde_yaml", - "sha2 0.10.2", - "subtle-encoding", - "tendermint", - "tendermint-rpc", - "tokio", - "toml", - "tracing", - "tracing-subscriber", + "tonic-build", + "walkdir", ] [[package]] name = "ics23" version = "0.8.0-alpha" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a435f2471c1b2ce14771da465d47321c5905fac866d0effa9e0a3eb5d94fcf" +source = "git+https://github.com/composablefi/ics23?rev=b500a5c6068eb53c83c4c6c13bd9d8c25e0bf927#b500a5c6068eb53c83c4c6c13bd9d8c25e0bf927" dependencies = [ "anyhow", "bytes", @@ -1699,16 +1307,10 @@ dependencies = [ "prost", "ripemd160", "sha2 0.9.9", - "sha3", - "sp-std", + "sha3 0.9.1", + "sp-core 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - [[package]] name = "idna" version = "0.2.3" @@ -1721,28 +1323,48 @@ dependencies = [ ] [[package]] -name = "indenter" -version = "0.3.3" +name = "impl-codec" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" +dependencies = [ + "parity-scale-codec", +] [[package]] -name = "indexmap" -version = "1.8.1" +name = "impl-serde" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" +checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" dependencies = [ - "autocfg", - "hashbrown", + "serde", ] [[package]] -name = "input_buffer" -version = "0.4.0" +name = "impl-trait-for-tuples" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f97967975f448f1a7ddb12b0bc41069d09ed6a1c161a92687e057325db35d413" +checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "bytes", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + +[[package]] +name = "indexmap" +version = "1.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", ] [[package]] @@ -1751,7 +1373,7 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1765,37 +1387,39 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.2" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" +checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] -name = "js-sys" -version = "0.3.57" +name = "itoa" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754" + +[[package]] +name = "jobserver" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397" +checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" dependencies = [ - "wasm-bindgen", + "libc", ] [[package]] -name = "k256" -version = "0.10.4" +name = "js-sys" +version = "0.3.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19c3a5e0a0b8450278feda242592512e09f61c72e018b8cd5c859482802daf2d" +checksum = "258451ab10b34f8af53416d1fdab72c22e805f0c92a1136d59470ec0b11138b2" dependencies = [ - "cfg-if 1.0.0", - "ecdsa", - "elliptic-curve", - "sec1", - "sha2 0.9.9", + "wasm-bindgen", ] [[package]] name = "keccak" -version = "0.1.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" +checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838" [[package]] name = "lazy_static" @@ -1805,21 +1429,101 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.126" +version = "0.2.132" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8371e4e5341c3a96db127eb2465ac681ced4c433e01dd0e938adbef26ba93ba5" + +[[package]] +name = "libgit2-sys" +version = "0.12.26+1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19e1c899248e606fbfe68dcb31d8b0176ebab833b103824af31bddf4b7457494" +dependencies = [ + "cc", + "libc", + "libssh2-sys", + "libz-sys", + "openssl-sys", + "pkg-config", +] + +[[package]] +name = "libsecp256k1" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" +dependencies = [ + "arrayref", + "base64", + "digest 0.9.0", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand 0.8.5", + "serde", + "sha2 0.9.9", +] + +[[package]] +name = "libsecp256k1-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libssh2-sys" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" +checksum = "b094a36eb4b8b8c8a7b4b8ae43b2944502be3e59cd87687595cf6b0a71b3f4ca" +dependencies = [ + "cc", + "libc", + "libz-sys", + "openssl-sys", + "pkg-config", + "vcpkg", +] [[package]] -name = "linked-hash-map" -version = "0.5.4" +name = "libz-sys" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] [[package]] name = "lock_api" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" +checksum = "9f80bf5aacaf25cbfc8210d1cfb718f2bf3b11c4c54e5afe36c236853a8ec390" dependencies = [ "autocfg", "scopeguard", @@ -1831,16 +1535,16 @@ version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] -name = "mach" -version = "0.3.2" +name = "matchers" +version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" +checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" dependencies = [ - "libc", + "regex-automata", ] [[package]] @@ -1864,12 +1568,6 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "memchr" version = "2.5.0" @@ -1878,20 +1576,29 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.5.6" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg", ] [[package]] -name = "memoffset" -version = "0.6.5" +name = "memory_units" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" + +[[package]] +name = "merlin" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" dependencies = [ - "autocfg", + "byteorder", + "keccak", + "rand_core 0.5.1", + "zeroize", ] [[package]] @@ -1900,16 +1607,6 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" -[[package]] -name = "mime_guess" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -1918,18 +1615,18 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.5.1" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b29bd4bc3f33391105ebee3589c19197c4271e3e5a9ec9bfe8127eeff8f082" +checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" +checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" dependencies = [ "libc", "log", @@ -1958,83 +1655,21 @@ dependencies = [ "tempfile", "thiserror", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.3.15", "ureq", ] [[package]] -name = "modelator" -version = "0.4.2" -source = "git+https://github.com/informalsystems/modelator#414fdeb192c165803639363808d88c14e6f972ad" -dependencies = [ - "clap", - "clap_complete", - "directories", - "hex", - "lazy_static", - "nom", - "once_cell", - "rayon", - "regex", - "serde", - "serde_json", - "sha2 0.10.2", - "tempfile", - "thiserror", - "tracing", - "tracing-subscriber", - "ureq", - "zip", -] - -[[package]] -name = "moka" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df72b50274c0988d9f4a6e808e06d9d926f265db6f8bbda1576bcaa658e72763" -dependencies = [ - "crossbeam-channel 0.5.4", - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.8.8", - "num_cpus", - "once_cell", - "parking_lot 0.12.0", - "quanta", - "scheduled-thread-pool", - "skeptic", - "smallvec", - "tagptr", - "thiserror", - "triomphe", - "uuid 0.8.2", -] - -[[package]] -name = "multipart" -version = "0.18.0" +name = "multimap" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00dec633863867f29cb39df64a397cdf4a6354708ddd7759f70c7fb51c5f9182" -dependencies = [ - "buf_redux", - "httparse", - "log", - "mime", - "mime_guess", - "quick-error", - "rand 0.8.5", - "safemem", - "tempfile", - "twoway", -] +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] -name = "nanoid" -version = "0.4.0" +name = "nodrop" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" -dependencies = [ - "rand 0.8.5", -] +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" [[package]] name = "nom" @@ -2048,14 +1683,13 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.3" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ "autocfg", "num-integer", "num-traits", - "serde", ] [[package]] @@ -2069,6 +1703,16 @@ dependencies = [ "syn", ] +[[package]] +name = "num-format" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bafe4179722c2894288ee77a9f044f02811c86af699344c498b0840c698a2465" +dependencies = [ + "arrayvec 0.4.12", + "itoa 0.4.8", +] + [[package]] name = "num-integer" version = "0.1.45" @@ -2081,15 +1725,14 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.4.0" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d41702bd167c2df5520b384281bc111a4b5efcf7fbc4c9c222c815b07e0a6a6a" +checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ "autocfg", "num-bigint", "num-integer", "num-traits", - "serde", ] [[package]] @@ -2120,29 +1763,17 @@ dependencies = [ "libc", ] -[[package]] -name = "object" -version = "0.28.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" -dependencies = [ - "memchr", -] - [[package]] name = "once_cell" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" +checksum = "074864da206b4973b84eb91683020dbefd6a8c3f0f38e054d93954e891935e4e" [[package]] -name = "oneline-eyre" -version = "0.1.0" +name = "opaque-debug" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862f17a1e689c0ce8ca158ea48e776c5101c5d14fdfbed3e01c15f89604c3097" -dependencies = [ - "eyre", -] +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" [[package]] name = "opaque-debug" @@ -2157,82 +1788,88 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] -name = "opentelemetry" -version = "0.17.0" +name = "openssl-sys" +version = "0.9.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" +checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" dependencies = [ - "async-trait", - "crossbeam-channel 0.5.4", - "dashmap", - "fnv", - "futures-channel", - "futures-executor", - "futures-util", - "js-sys", - "lazy_static", - "percent-encoding", - "pin-project", - "rand 0.8.5", - "thiserror", + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", ] [[package]] -name = "opentelemetry-prometheus" -version = "0.10.0" +name = "os_str_bytes" +version = "6.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9328977e479cebe12ce0d3fcecdaea4721d234895a9440c5b5dfd113f0594ac6" -dependencies = [ - "opentelemetry", - "prometheus", - "protobuf", -] +checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff" [[package]] -name = "os_str_bytes" -version = "6.0.1" +name = "parity-scale-codec" +version = "3.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "029d8d0b2f198229de29dca79676f2738ff952edf3fde542eb8bf94d8c21b435" +checksum = "9182e4a71cae089267ab03e67c99368db7cd877baf50f931e5d6d4b71e195ac0" +dependencies = [ + "arrayvec 0.7.2", + "bitvec", + "byte-slice-cast", + "impl-trait-for-tuples", + "parity-scale-codec-derive", + "serde", +] [[package]] -name = "owo-colors" -version = "3.4.0" +name = "parity-scale-codec-derive" +version = "3.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "decf7381921fea4dcb2549c5667eda59b3ec297ab7e2b5fc33eac69d2e7da87b" +checksum = "9299338969a3d2f491d65f140b00ddec470858402f888af98e8642fb5e8965cd" +dependencies = [ + "proc-macro-crate 1.2.1", + "proc-macro2", + "quote", + "syn", +] [[package]] -name = "parking_lot" -version = "0.11.2" +name = "parity-util-mem" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +checksum = "c32561d248d352148124f036cac253a644685a21dc9fea383eb4907d7bd35a8f" dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.5", + "cfg-if", + "impl-trait-for-tuples", + "parity-util-mem-derive", + "primitive-types", + "winapi", ] [[package]] -name = "parking_lot" -version = "0.12.0" +name = "parity-util-mem-derive" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" +checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ - "lock_api", - "parking_lot_core 0.9.3", + "proc-macro2", + "syn", + "synstructure", ] [[package]] -name = "parking_lot_core" -version = "0.8.5" +name = "parity-wasm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" + +[[package]] +name = "parking_lot" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ - "cfg-if 1.0.0", - "instant", - "libc", - "redox_syscall", - "smallvec", - "winapi", + "lock_api", + "parking_lot_core", ] [[package]] @@ -2241,7 +1878,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "redox_syscall", "smallvec", @@ -2250,9 +1887,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" +checksum = "9423e2b32f7a043629287a536f21951e8c6a82482d0acb1eeebfc90bc2225b22" [[package]] name = "pbkdf2" @@ -2263,6 +1900,15 @@ dependencies = [ "crypto-mac 0.8.0", ] +[[package]] +name = "pbkdf2" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" +dependencies = [ + "crypto-mac 0.11.1", +] + [[package]] name = "peg" version = "0.7.0" @@ -2296,20 +1942,30 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +[[package]] +name = "petgraph" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" +dependencies = [ + "fixedbitset", + "indexmap", +] + [[package]] name = "pin-project" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", @@ -2328,17 +1984,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs8" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cabda3fb821068a9a4fab19a683eac3af12edf0f34b94a8be53c4972b8149d0" -dependencies = [ - "der", - "spki", - "zeroize", -] - [[package]] name = "pkg-config" version = "0.3.25" @@ -2351,6 +1996,49 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +[[package]] +name = "prettyplease" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "697ae720ee02011f439e0701db107ffe2916d83f718342d65d7f8bf7b8a5fee9" +dependencies = [ + "proc-macro2", + "syn", +] + +[[package]] +name = "primitive-types" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e28720988bff275df1f51b171e1b2a18c30d194c4d2b61defdacecd625a5d94a" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-serde", + "scale-info", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +dependencies = [ + "toml", +] + +[[package]] +name = "proc-macro-crate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" +dependencies = [ + "once_cell", + "thiserror", + "toml", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -2377,36 +2065,43 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.39" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" +checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab" dependencies = [ "unicode-ident", ] [[package]] -name = "prometheus" -version = "0.13.1" +name = "prost" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cface98dfa6d645ea4c789839f176e4b072265d085bfcc48eaa8d137f58d3c39" +checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" dependencies = [ - "cfg-if 1.0.0", - "fnv", - "lazy_static", - "memchr", - "parking_lot 0.12.0", - "protobuf", - "thiserror", + "bytes", + "prost-derive", ] [[package]] -name = "prost" -version = "0.10.3" +name = "prost-build" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc03e116981ff7d8da8e5c220e374587b98d294af7ba7dd7fda761158f00086f" +checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" dependencies = [ "bytes", - "prost-derive", + "cfg-if", + "cmake", + "heck 0.4.0", + "itertools", + "lazy_static", + "log", + "multimap", + "petgraph", + "prost", + "prost-types", + "regex", + "tempfile", + "which", ] [[package]] @@ -2433,51 +2128,31 @@ dependencies = [ ] [[package]] -name = "protobuf" -version = "2.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf7e6d18738ecd0902d30d1ad232c9125985a3422929b16c65517b38adc14f96" - -[[package]] -name = "pulldown-cmark" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34f197a544b0c9ab3ae46c359a7ec9cbbb5c7bf97054266fecb7ead794a181d6" -dependencies = [ - "bitflags", - "memchr", - "unicase", -] - -[[package]] -name = "quanta" -version = "0.10.0" +name = "quote" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bafd74c340a0a7e79415981ede3460df16b530fd071541901a57416eea950b17" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" dependencies = [ - "crossbeam-utils 0.8.8", - "libc", - "mach", - "once_cell", - "raw-cpuid", - "wasi 0.10.2+wasi-snapshot-preview1", - "web-sys", - "winapi", + "proc-macro2", ] [[package]] -name = "quick-error" -version = "1.2.3" +name = "radium" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" [[package]] -name = "quote" -version = "1.0.18" +name = "rand" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" +checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" dependencies = [ - "proc-macro2", + "fuchsia-cprng", + "libc", + "rand_core 0.3.1", + "rdrand", + "winapi", ] [[package]] @@ -2491,6 +2166,7 @@ dependencies = [ "rand_chacha 0.2.2", "rand_core 0.5.1", "rand_hc", + "rand_pcg", ] [[package]] @@ -2524,6 +2200,21 @@ dependencies = [ "rand_core 0.6.3", ] +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +dependencies = [ + "rand_core 0.4.2", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + [[package]] name = "rand_core" version = "0.5.1" @@ -2539,7 +2230,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", ] [[package]] @@ -2552,12 +2243,12 @@ dependencies = [ ] [[package]] -name = "raw-cpuid" -version = "10.3.0" +name = "rand_pcg" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "738bc47119e3eeccc7e94c4a506901aea5e7b4944ecd0829cbebf4af04ceda12" +checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" dependencies = [ - "bitflags", + "rand_core 0.5.1", ] [[package]] @@ -2578,17 +2269,26 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" dependencies = [ - "crossbeam-channel 0.5.4", + "crossbeam-channel", "crossbeam-deque", - "crossbeam-utils 0.8.8", + "crossbeam-utils", "num_cpus", ] +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "redox_syscall" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] @@ -2599,16 +2299,36 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", "redox_syscall", "thiserror", ] +[[package]] +name = "ref-cast" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed13bcd201494ab44900a96490291651d200730904221832b9547d24a87d332b" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5234cd6063258a5e32903b53b1b6ac043a0541c8adc1f610f67b0326c7a578fa" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "regex" -version = "1.5.6" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" +checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" dependencies = [ "aho-corasick", "memchr", @@ -2626,9 +2346,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.26" +version = "0.6.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" +checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" [[package]] name = "remove_dir_all" @@ -2640,27 +2360,10 @@ dependencies = [ ] [[package]] -name = "retry" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac95c60a949a63fd2822f4964939662d8f2c16c4fa0624fd954bc6e703b9a3f6" - -[[package]] -name = "rfc6979" -version = "0.1.0" +name = "ring" +version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96ef608575f6392792f9ecf7890c00086591d29a83910939d430753f7c050525" -dependencies = [ - "crypto-bigint", - "hmac 0.11.0", - "zeroize", -] - -[[package]] -name = "ring" -version = "0.16.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", @@ -2679,40 +2382,9 @@ checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" dependencies = [ "block-buffer 0.9.0", "digest 0.9.0", - "opaque-debug", -] - -[[package]] -name = "rouille" -version = "3.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18b2380c42510ef4a28b5f228a174c801e0dec590103e215e60812e2e2f34d05" -dependencies = [ - "base64", - "brotli", - "chrono", - "deflate", - "filetime", - "multipart", - "num_cpus", - "percent-encoding", - "rand 0.8.5", - "serde", - "serde_derive", - "serde_json", - "sha1", - "threadpool", - "time 0.3.9", - "tiny_http", - "url", + "opaque-debug 0.3.0", ] -[[package]] -name = "rustc-demangle" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" - [[package]] name = "rustc-hash" version = "1.1.0" @@ -2720,17 +2392,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] -name = "rustls" -version = "0.19.1" +name = "rustc-hex" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" -dependencies = [ - "base64", - "log", - "ring", - "sct 0.6.1", - "webpki 0.21.4", -] +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" [[package]] name = "rustls" @@ -2740,54 +2405,15 @@ checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" dependencies = [ "log", "ring", - "sct 0.7.0", - "webpki 0.22.0", + "sct", + "webpki", ] -[[package]] -name = "rustls-native-certs" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" -dependencies = [ - "openssl-probe", - "rustls 0.19.1", - "schannel", - "security-framework", -] - -[[package]] -name = "rustls-native-certs" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" -dependencies = [ - "openssl-probe", - "rustls-pemfile", - "schannel", - "security-framework", -] - -[[package]] -name = "rustls-pemfile" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" -dependencies = [ - "base64", -] - -[[package]] -name = "rustversion" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" - [[package]] name = "ryu" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" [[package]] name = "safe-proc-macro2" @@ -2836,12 +2462,6 @@ dependencies = [ "safe-regex-compiler", ] -[[package]] -name = "safemem" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" - [[package]] name = "same-file" version = "1.0.6" @@ -2852,22 +2472,28 @@ dependencies = [ ] [[package]] -name = "schannel" -version = "0.1.20" +name = "scale-info" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" +checksum = "c46be926081c9f4dd5dd9b6f1d3e3229f2360bc6502dd8836f84a93b7c75e99a" dependencies = [ - "lazy_static", - "windows-sys", + "bitvec", + "cfg-if", + "derive_more", + "parity-scale-codec", + "scale-info-derive", ] [[package]] -name = "scheduled-thread-pool" -version = "0.2.5" +name = "scale-info-derive" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f74fd1204073fa02d5d5d68bec8021be4c38690b61264b2fdb48083d0e7d7" +checksum = "50e334bb10a245e28e5fd755cabcafd96cfcd167c99ae63a46924ca8d8703a3c" dependencies = [ - "parking_lot 0.11.2", + "proc-macro-crate 1.2.1", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -2895,20 +2521,28 @@ dependencies = [ ] [[package]] -name = "scopeguard" -version = "1.1.0" +name = "schnorrkel" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" +dependencies = [ + "arrayref", + "arrayvec 0.5.2", + "curve25519-dalek 2.1.3", + "getrandom 0.1.16", + "merlin", + "rand 0.7.3", + "rand_core 0.5.1", + "sha2 0.8.2", + "subtle", + "zeroize", +] [[package]] -name = "sct" -version = "0.6.1" +name = "scopeguard" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" -dependencies = [ - "ring", - "untrusted", -] +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "sct" @@ -2920,34 +2554,20 @@ dependencies = [ "untrusted", ] -[[package]] -name = "sec1" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08da66b8b0965a5555b6bd6639e68ccba85e1e2506f5fbb089e93f8a04e1a2d1" -dependencies = [ - "der", - "generic-array", - "pkcs8", - "subtle", - "zeroize", -] - [[package]] name = "secp256k1" -version = "0.22.1" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26947345339603ae8395f68e2f3d85a6b0a8ddfe6315818e80b8504415099db0" +checksum = "9c42e6f1735c5f00f51e43e28d6634141f2bcad10931b2609ddd74a86d751260" dependencies = [ "secp256k1-sys", - "serde", ] [[package]] name = "secp256k1-sys" -version = "0.5.2" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "152e20a0fd0519390fc43ab404663af8a0b794273d2a91d60ad4a39f13ffe110" +checksum = "957da2573cde917463ece3570eab4a0b3f19de6f1646cde62e6fd3868f566036" dependencies = [ "cc", ] @@ -2958,75 +2578,32 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ - "serde", "zeroize", ] -[[package]] -name = "security-framework" -version = "2.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" -dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "semver" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cb243bdfdb5936c8dc3c45762a19d12ab4550cdc753bc247637d4ec35a040fd" -dependencies = [ - "serde", -] - [[package]] name = "serde" -version = "1.0.137" +version = "1.0.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" +checksum = "0f747710de3dcd43b88c9168773254e809d8ddbdf9653b84e2554ab219f17860" dependencies = [ "serde_derive", ] [[package]] name = "serde_bytes" -version = "0.11.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212e73464ebcde48d723aa02eb270ba62eff38a9b732df31f33f1b4e145f3a54" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_cbor" -version = "0.11.2" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" +checksum = "cfc50e8183eeeb6178dcb167ae34a8051d63535023ae38b5d8d12beae193d37b" dependencies = [ - "half", "serde", ] [[package]] name = "serde_derive" -version = "1.0.137" +version = "1.0.144" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" +checksum = "94ed3a816fb1d101812f83e789f888322c34e291f894f19590dc310963e87a00" dependencies = [ "proc-macro2", "quote", @@ -3046,20 +2623,20 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.81" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" +checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" dependencies = [ - "itoa", + "itoa 1.0.3", "ryu", "serde", ] [[package]] name = "serde_repr" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ad84e47328a31223de7fed7a4f5087f2d6ddfe586cf3ca25b7a165bc0a5aed" +checksum = "1fe39d9fbb0ebf5eb2c7cb7e2a47e4f462fad1379f1166b8ae49ad9eae89a7ca" dependencies = [ "proc-macro2", "quote", @@ -3067,203 +2644,424 @@ dependencies = [ ] [[package]] -name = "serde_yaml" -version = "0.8.24" +name = "sha2" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" +checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" dependencies = [ - "indexmap", - "ryu", - "serde", - "yaml-rust", + "block-buffer 0.7.3", + "digest 0.8.1", + "fake-simd", + "opaque-debug 0.2.3", ] [[package]] -name = "serial_test" -version = "0.7.0" +name = "sha2" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19dbfb999a147cedbfe82f042eb9555f5b0fa4ef95ee4570b74349103d9c9f4" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ - "lazy_static", - "log", - "parking_lot 0.12.0", - "serial_test_derive", + "block-buffer 0.9.0", + "cfg-if", + "cpufeatures", + "digest 0.9.0", + "opaque-debug 0.3.0", ] [[package]] -name = "serial_test_derive" -version = "0.7.0" +name = "sha2" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb9e2050b2be1d681f8f1c1a528bcfe4e00afa2d8995f713974f5333288659f2" +checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" dependencies = [ - "proc-macro-error", - "proc-macro2", - "quote", - "rustversion", - "syn", + "cfg-if", + "cpufeatures", + "digest 0.10.3", ] [[package]] -name = "sha-1" -version = "0.9.8" +name = "sha3" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", "digest 0.9.0", - "opaque-debug", + "keccak", + "opaque-debug 0.3.0", ] [[package]] -name = "sha-1" -version = "0.10.0" +name = "sha3" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +checksum = "0a31480366ec990f395a61b7c08122d99bd40544fdb5abcfc1b06bb29994312c" dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", "digest 0.10.3", + "keccak", ] [[package]] -name = "sha1" -version = "0.6.1" +name = "sharded-slab" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" dependencies = [ - "sha1_smol", + "lazy_static", ] [[package]] -name = "sha1_smol" -version = "1.0.0" +name = "signal-hook-registry" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +dependencies = [ + "libc", +] [[package]] -name = "sha2" -version = "0.9.9" +name = "signature" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" +checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" + +[[package]] +name = "slab" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.9.0", - "opaque-debug", + "autocfg", ] [[package]] -name = "sha2" -version = "0.10.2" +name = "smallvec" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" +checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1" + +[[package]] +name = "socket2" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10c98bba371b9b22a71a9414e420f92ddeb2369239af08200816169d5e2dd7aa" dependencies = [ - "cfg-if 1.0.0", - "cpufeatures", - "digest 0.10.3", + "libc", + "winapi", ] [[package]] -name = "sha3" -version = "0.9.1" +name = "sp-core" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" +checksum = "77963e2aa8fadb589118c3aede2e78b6c4bcf1c01d588fbf33e915b390825fbd" dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug", + "base58", + "bitflags", + "blake2-rfc", + "byteorder", + "dyn-clonable", + "ed25519-dalek", + "futures", + "hash-db", + "hash256-std-hasher", + "hex", + "impl-serde", + "lazy_static", + "libsecp256k1", + "log", + "merlin", + "num-traits", + "parity-scale-codec", + "parity-util-mem", + "parking_lot", + "primitive-types", + "rand 0.7.3", + "regex", + "scale-info", + "schnorrkel", + "secp256k1", + "secrecy", + "serde", + "sp-core-hashing 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-debug-derive 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-externalities 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-runtime-interface 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ss58-registry", + "substrate-bip39", + "thiserror", + "tiny-bip39", + "wasmi", + "zeroize", ] [[package]] -name = "sharded-slab" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +name = "sp-core" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" dependencies = [ + "base58", + "bitflags", + "blake2-rfc", + "byteorder", + "dyn-clonable", + "ed25519-dalek", + "futures", + "hash-db", + "hash256-std-hasher", + "hex", + "impl-serde", "lazy_static", + "libsecp256k1", + "log", + "merlin", + "num-traits", + "parity-scale-codec", + "parity-util-mem", + "parking_lot", + "primitive-types", + "rand 0.7.3", + "regex", + "scale-info", + "schnorrkel", + "secp256k1", + "secrecy", + "serde", + "sp-core-hashing 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "ss58-registry", + "substrate-bip39", + "thiserror", + "tiny-bip39", + "wasmi", + "zeroize", ] [[package]] -name = "signal-hook" -version = "0.3.14" +name = "sp-core-hashing" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a253b5e89e2698464fc26b545c9edceb338e18a89effeeecfea192c3025be29d" +checksum = "ec864a6a67249f0c8dd3d5acab43623a61677e85ff4f2f9b04b802d2fe780e83" dependencies = [ - "libc", - "signal-hook-registry", + "blake2-rfc", + "byteorder", + "sha2 0.9.9", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak", + "twox-hash", ] [[package]] -name = "signal-hook-registry" -version = "1.4.0" +name = "sp-core-hashing" +version = "4.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" +dependencies = [ + "blake2", + "byteorder", + "digest 0.10.3", + "sha2 0.10.2", + "sha3 0.10.2", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "twox-hash", +] + +[[package]] +name = "sp-debug-derive" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +checksum = "d676664972e22a0796176e81e7bec41df461d1edf52090955cdab55f2c956ff2" dependencies = [ - "libc", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "signature" -version = "1.4.0" +name = "sp-debug-derive" +version = "4.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-externalities" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02658e48d89f2bec991f9a78e69cfa4c316f8d6a6c4ec12fae1aeb263d486788" +checksum = "0fcfd91f92a2a59224230a77c4a5d6f51709620c0aab4e51f108ccece6adc56f" dependencies = [ - "digest 0.9.0", - "rand_core 0.6.3", + "environmental", + "parity-scale-codec", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "simple-error" -version = "0.2.3" +name = "sp-externalities" +version = "0.12.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" +dependencies = [ + "environmental", + "parity-scale-codec", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", +] + +[[package]] +name = "sp-runtime-interface" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc47a29ce97772ca5c927f75bac34866b16d64e07f330c3248e2d7226623901b" +checksum = "158bf0305c75a50fc0e334b889568f519a126e32b87900c3f4251202dece7b4b" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec", + "primitive-types", + "sp-externalities 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-runtime-interface-proc-macro 5.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-storage 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-tracing 5.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-wasm-interface 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "static_assertions", +] [[package]] -name = "skeptic" -version = "0.13.7" +name = "sp-runtime-interface" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec", + "primitive-types", + "sp-externalities 0.12.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-runtime-interface-proc-macro 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-storage 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-tracing 5.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-wasm-interface 6.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "static_assertions", +] + +[[package]] +name = "sp-runtime-interface-proc-macro" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" +checksum = "22ecb916b9664ed9f90abef0ff5a3e61454c1efea5861b2997e03f39b59b955f" dependencies = [ - "bytecount", - "cargo_metadata", - "error-chain", - "glob", - "pulldown-cmark", - "tempfile", - "walkdir", + "Inflector", + "proc-macro-crate 1.2.1", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "slab" -version = "0.4.6" +name = "sp-runtime-interface-proc-macro" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" +dependencies = [ + "Inflector", + "proc-macro-crate 1.2.1", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-std" +version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" +checksum = "14804d6069ee7a388240b665f17908d98386ffb0b5d39f89a4099fc7a2a4c03f" [[package]] -name = "smallvec" -version = "1.8.0" +name = "sp-std" +version = "4.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" + +[[package]] +name = "sp-storage" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "5dab53af846068e3e0716d3ccc70ea0db44035c79b2ed5821aaa6635039efa37" +dependencies = [ + "impl-serde", + "parity-scale-codec", + "ref-cast", + "serde", + "sp-debug-derive 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] -name = "socket2" -version = "0.4.4" +name = "sp-storage" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" +dependencies = [ + "impl-serde", + "parity-scale-codec", + "ref-cast", + "serde", + "sp-debug-derive 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", +] + +[[package]] +name = "sp-tracing" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "69a67e555d171c4238bd223393cda747dd20ec7d4f5fe5c042c056cb7fde9eda" dependencies = [ - "libc", - "winapi", + "parity-scale-codec", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tracing", + "tracing-core", + "tracing-subscriber 0.2.25", ] [[package]] -name = "sp-std" -version = "3.0.0" +name = "sp-tracing" +version = "5.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" +dependencies = [ + "parity-scale-codec", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "tracing", + "tracing-core", + "tracing-subscriber 0.2.25", +] + +[[package]] +name = "sp-wasm-interface" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35391ea974fa5ee869cb094d5b437688fbf3d8127d64d1b9fed5822a1ed39b12" +checksum = "10d88debe690c2b24eaa9536a150334fcef2ae184c21a0e5b3e80135407a7d52" +dependencies = [ + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "sp-std 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "wasmi", +] + +[[package]] +name = "sp-wasm-interface" +version = "6.0.0" +source = "git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24#814752f60ab8cce7e2ece3ce0c1b10799b4eab28" +dependencies = [ + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "sp-std 4.0.0 (git+https://github.com/paritytech/substrate?branch=polkadot-v0.9.24)", + "wasmi", +] [[package]] name = "spin" @@ -3272,13 +3070,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] -name = "spki" -version = "0.5.4" +name = "ss58-registry" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d01ac02a6ccf3e07db148d2be087da624fea0221a16152ed01f0496a6b0a27" +checksum = "1c8a1e645fa0bd3e81a90e592a677f7ada3182ac338c4a71cd9ec0ba911f6abb" dependencies = [ - "base64ct", - "der", + "Inflector", + "num-format", + "proc-macro2", + "quote", + "serde", + "serde_json", + "unicode-xid", ] [[package]] @@ -3293,6 +3096,19 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "substrate-bip39" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49eee6965196b32f882dd2ee85a92b1dbead41b04e53907f269de3b0dc04733c" +dependencies = [ + "hmac 0.11.0", + "pbkdf2 0.8.0", + "schnorrkel", + "sha2 0.9.9", + "zeroize", +] + [[package]] name = "subtle" version = "2.4.1" @@ -3308,11 +3124,17 @@ dependencies = [ "zeroize", ] +[[package]] +name = "subtle-ng" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "734676eb262c623cec13c3155096e08d1f8f29adce39ba17948b18dad1e54142" + [[package]] name = "syn" -version = "1.0.96" +version = "1.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" +checksum = "58dbef6ec655055e20b86b15a8cc6d439cca19b667537ac6a1369572d151ab13" dependencies = [ "proc-macro2", "quote", @@ -3338,10 +3160,20 @@ dependencies = [ ] [[package]] -name = "tagptr" -version = "0.2.0" +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempdir" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" +checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" +dependencies = [ + "rand 0.4.6", + "remove_dir_all", +] [[package]] name = "tempfile" @@ -3349,7 +3181,7 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fastrand", "libc", "redox_syscall", @@ -3359,40 +3191,36 @@ dependencies = [ [[package]] name = "tendermint" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ca881fa4dedd2b46334f13be7fbc8cc1549ba4be5a833fe4e73d1a1baaf7949" +version = "0.24.0-pre.2" +source = "git+https://github.com/composableFi/tendermint-rs?rev=5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8#5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" dependencies = [ "async-trait", "bytes", "ed25519", - "ed25519-dalek", + "ed25519-consensus", "flex-error", "futures", - "k256", "num-traits", "once_cell", "prost", "prost-types", - "ripemd160", "serde", "serde_bytes", "serde_json", "serde_repr", - "sha2 0.9.9", + "sha2 0.10.2", "signature", "subtle", "subtle-encoding", "tendermint-proto", - "time 0.3.9", + "time", "zeroize", ] [[package]] name = "tendermint-config" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6c56ee93f4e9b7e7daba86d171f44572e91b741084384d0ae00df7991873dfd" +version = "0.24.0-pre.2" +source = "git+https://github.com/composableFi/tendermint-rs?rev=5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8#5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" dependencies = [ "flex-error", "serde", @@ -3402,46 +3230,10 @@ dependencies = [ "url", ] -[[package]] -name = "tendermint-light-client" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e30a20da1169069a8629b9535bcee669be8b07480c696b5eb2f7d9cd4e4c431" -dependencies = [ - "contracts", - "crossbeam-channel 0.4.4", - "derive_more", - "flex-error", - "futures", - "serde", - "serde_cbor", - "serde_derive", - "static_assertions", - "tendermint", - "tendermint-light-client-verifier", - "tendermint-rpc", - "time 0.3.9", - "tokio", -] - -[[package]] -name = "tendermint-light-client-verifier" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ae030a759b89cca84860d497d4d4e491615d8a9243cc04c61cd89335ba9b593" -dependencies = [ - "derive_more", - "flex-error", - "serde", - "tendermint", - "time 0.3.9", -] - [[package]] name = "tendermint-proto" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71f925d74903f4abbdc4af0110635a307b3cb05b175fdff4a7247c14a4d0874" +version = "0.24.0-pre.2" +source = "git+https://github.com/composableFi/tendermint-rs?rev=5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8#5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" dependencies = [ "bytes", "flex-error", @@ -3452,59 +3244,34 @@ dependencies = [ "serde", "serde_bytes", "subtle-encoding", - "time 0.3.9", + "time", ] [[package]] name = "tendermint-rpc" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a13e63f57ee05a1e927887191c76d1b139de9fa40c180b9f8727ee44377242a6" +version = "0.24.0-pre.2" +source = "git+https://github.com/composableFi/tendermint-rs?rev=5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8#5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" dependencies = [ - "async-trait", - "async-tungstenite", "bytes", "flex-error", - "futures", - "getrandom 0.2.6", - "http", - "hyper", - "hyper-proxy", - "hyper-rustls", + "getrandom 0.2.7", "peg", "pin-project", "serde", "serde_bytes", "serde_json", + "subtle", "subtle-encoding", "tendermint", "tendermint-config", "tendermint-proto", "thiserror", - "time 0.3.9", - "tokio", - "tracing", + "time", "url", - "uuid 0.8.2", + "uuid", "walkdir", ] -[[package]] -name = "tendermint-testgen" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "442ede2d01e61466e515fd7f1d0aac7c3c86b3066535479caa86a43afb5e2e17" -dependencies = [ - "ed25519-dalek", - "gumdrop", - "serde", - "serde_json", - "simple-error", - "tempfile", - "tendermint", - "time 0.3.9", -] - [[package]] name = "termcolor" version = "1.1.3" @@ -3514,21 +3281,11 @@ dependencies = [ "winapi-util", ] -[[package]] -name = "terminal_size" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "test-log" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4235dbf7ea878b3ef12dea20a59c134b405a66aafc4fc2c7b9935916e289e735" +checksum = "38f0c854faeb68a048f0f2dc410c5ddae3bf83854ef0e4977d58306a5edef50e" dependencies = [ "proc-macro2", "quote", @@ -3543,18 +3300,18 @@ checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] name = "thiserror" -version = "1.0.31" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" +checksum = "f5f6586b7f764adc0231f4c79be7b920e766bb2f3e51b3661cdb263828f19994" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.31" +version = "1.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" +checksum = "12bafc5b54507e0149cdf1b145a5d80ab80a90bcd9275df43d4fff68460f6c21" dependencies = [ "proc-macro2", "quote", @@ -3570,30 +3327,11 @@ dependencies = [ "once_cell", ] -[[package]] -name = "threadpool" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" -dependencies = [ - "num_cpus", -] - -[[package]] -name = "time" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "time" -version = "0.3.9" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd" +checksum = "3c3f9a28b618c3a6b9251b6908e9c99e04b9e5c02e6581ccbb67d59c34ef7f9b" dependencies = [ "libc", "num_threads", @@ -3615,7 +3353,7 @@ dependencies = [ "anyhow", "hmac 0.8.1", "once_cell", - "pbkdf2", + "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", "sha2 0.9.9", @@ -3634,19 +3372,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "tiny_http" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce51b50006056f590c9b7c3808c3bd70f0d1101666629713866c227d6e58d39" -dependencies = [ - "ascii", - "chrono", - "chunked_transfer", - "log", - "url", -] - [[package]] name = "tinyvec" version = "1.6.0" @@ -3664,17 +3389,18 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.18.2" +version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4903bf0427cf68dddd5aa6a93220756f8be0c34fcfa9f5e6191e103e15a31395" +checksum = "7a8325f63a7d4774dd041e363b2409ed1c5cbbd0f867795e661df066b2b0a581" dependencies = [ + "autocfg", "bytes", "libc", "memchr", "mio", "num_cpus", "once_cell", - "parking_lot 0.12.0", + "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", @@ -3694,42 +3420,20 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" dependencies = [ "proc-macro2", "quote", "syn", ] -[[package]] -name = "tokio-rustls" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" -dependencies = [ - "rustls 0.19.1", - "tokio", - "webpki 0.21.4", -] - -[[package]] -name = "tokio-rustls" -version = "0.23.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" -dependencies = [ - "rustls 0.20.6", - "tokio", - "webpki 0.22.0", -] - [[package]] name = "tokio-stream" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" dependencies = [ "futures-core", "pin-project-lite", @@ -3738,9 +3442,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c" +checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" dependencies = [ "bytes", "futures-core", @@ -3781,10 +3485,7 @@ dependencies = [ "pin-project", "prost", "prost-derive", - "rustls-native-certs 0.6.2", - "rustls-pemfile", "tokio", - "tokio-rustls 0.23.4", "tokio-stream", "tokio-util", "tower", @@ -3794,11 +3495,24 @@ dependencies = [ "tracing-futures", ] +[[package]] +name = "tonic-build" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9263bf4c9bfaae7317c1c2faf7f18491d2fe476f70c414b73bf5d445b00ffa1" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "quote", + "syn", +] + [[package]] name = "tower" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", @@ -3816,9 +3530,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d342c6d58709c0a6d48d48dabbb62d4ef955cf5f0f3bbfd845838e7ae88dbae" +checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" dependencies = [ "bitflags", "bytes", @@ -3841,17 +3555,17 @@ checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.34" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" +checksum = "2fce9567bd60a67d08a16488756721ba392f24f29006402881e43b19aac64307" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "log", "pin-project-lite", "tracing-attributes", @@ -3860,9 +3574,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" +checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2" dependencies = [ "proc-macro2", "quote", @@ -3871,24 +3585,14 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.26" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" +checksum = "5aeea4303076558a00714b823f9ad67d58a3bbda1df83d8827d21193156e22f7" dependencies = [ - "lazy_static", + "once_cell", "valuable", ] -[[package]] -name = "tracing-error" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" -dependencies = [ - "tracing", - "tracing-subscriber", -] - [[package]] name = "tracing-futures" version = "0.2.5" @@ -3922,13 +3626,14 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.11" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" dependencies = [ "ansi_term", + "chrono", "lazy_static", - "matchers", + "matchers 0.0.1", "regex", "serde", "serde_json", @@ -3942,12 +3647,24 @@ dependencies = [ ] [[package]] -name = "triomphe" -version = "0.1.5" +name = "tracing-subscriber" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c45e322b26410d7260e00f64234810c2f17d7ece356182af4df8f7ff07890f09" +checksum = "60db860322da191b40952ad9affe65ea23e7dd6a5c442c2c42865810c6ab8e6b" dependencies = [ - "memoffset 0.6.5", + "ansi_term", + "matchers 0.1.0", + "once_cell", + "regex", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", ] [[package]] @@ -3957,31 +3674,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] -name = "tungstenite" -version = "0.12.0" +name = "twox-hash" +version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ada8297e8d70872fa9a551d93250a9f407beb9f37ef86494eb20012a2ff7c24" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "base64", - "byteorder", - "bytes", - "http", - "httparse", - "input_buffer", - "log", + "cfg-if", + "digest 0.10.3", "rand 0.8.5", - "sha-1 0.9.8", - "url", - "utf-8", -] - -[[package]] -name = "twoway" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" -dependencies = [ - "memchr", + "static_assertions", ] [[package]] @@ -4002,15 +3703,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "unicase" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] - [[package]] name = "unicode-bidi" version = "0.3.8" @@ -4019,24 +3711,24 @@ checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" +checksum = "c4f5b37a154999a8f3f98cc23a628d850e154479cd94decf3414696e12e31aaf" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "854cbdc4f7bc6ae19c820d44abdc3277ac3e1b2b93db20a636825d9322fb60e6" dependencies = [ "tinyvec", ] [[package]] -name = "unicode-width" -version = "0.1.9" +name = "unicode-segmentation" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" +checksum = "7e8820f5d777f6224dc4be3632222971ac30164d4a258d595640799554ebfd99" [[package]] name = "unicode-xid" @@ -4052,19 +3744,19 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "ureq" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9399fa2f927a3d327187cbd201480cee55bee6ac5d3c77dd27f0c6814cff16d5" +checksum = "b97acb4c28a254fd7a4aeec976c46a7fa404eac4d7c134b30c75144846d7cb8f" dependencies = [ "base64", "chunked_transfer", "flate2", "log", "once_cell", - "rustls 0.20.6", + "rustls", "url", - "webpki 0.22.0", - "webpki-roots 0.22.3", + "webpki", + "webpki-roots", ] [[package]] @@ -4079,29 +3771,11 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - [[package]] name = "uuid" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" -dependencies = [ - "getrandom 0.2.6", -] - -[[package]] -name = "uuid" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6d5d669b51467dcf7b2f1a796ce0f955f05f01cafda6c19d6e95f730df29238" -dependencies = [ - "getrandom 0.2.6", -] [[package]] name = "valuable" @@ -4110,19 +3784,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] -name = "version_check" -version = "0.9.4" +name = "vcpkg" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] -name = "wait-timeout" -version = "0.2.0" +name = "version_check" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" -dependencies = [ - "libc", -] +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" @@ -4151,12 +3822,6 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -4165,23 +3830,23 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.80" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" +checksum = "fc7652e3f6c4706c8d9cd54832c4a4ccb9b5336e2c3bd154d5cccfbf1c1f5f7d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.80" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" +checksum = "662cd44805586bd52971b9586b1df85cdbbd9112e4ef4d8f41559c334dc6ac3f" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", @@ -4190,9 +3855,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.80" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" +checksum = "b260f13d3012071dfb1512849c033b1925038373aea48ced3012c09df952c602" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4200,9 +3865,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.80" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" +checksum = "5be8e654bdd9b79216c2929ab90721aa82faf65c48cdf08bdc4e7f51357b80da" dependencies = [ "proc-macro2", "quote", @@ -4213,28 +3878,42 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.80" +version = "0.2.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" +checksum = "6598dd0bd3c7d51095ff6531a5b23e02acdc81804e30d8f07afb77b7215a140a" [[package]] -name = "web-sys" -version = "0.3.57" +name = "wasmi" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" +checksum = "ca00c5147c319a8ec91ec1a0edbec31e566ce2c9cc93b3f9bb86a9efd0eb795d" dependencies = [ - "js-sys", - "wasm-bindgen", + "downcast-rs", + "libc", + "memory_units", + "num-rational", + "num-traits", + "parity-wasm", + "wasmi-validation", ] [[package]] -name = "webpki" -version = "0.21.4" +name = "wasmi-validation" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +checksum = "165343ecd6c018fc09ebcae280752702c9a2ef3e6f8d02f1cfcbdb53ef6d7937" dependencies = [ - "ring", - "untrusted", + "parity-wasm", +] + +[[package]] +name = "web-sys" +version = "0.3.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed055ab27f941423197eb86b2035720b1a3ce40504df082cac2ecc6ed73335a1" +dependencies = [ + "js-sys", + "wasm-bindgen", ] [[package]] @@ -4249,20 +3928,22 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.21.1" +version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aabe153544e473b775453675851ecc86863d2a81d786d741f6b76778f2a48940" +checksum = "f1c760f0d366a6c24a02ed7816e23e691f5d92291f94d15e836006fd11b04daf" dependencies = [ - "webpki 0.21.4", + "webpki", ] [[package]] -name = "webpki-roots" -version = "0.22.3" +name = "which" +version = "4.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d8de8415c823c8abd270ad483c6feeac771fad964890779f9a8cb24fbbc1bf" +checksum = "5c4fb54e6113b6a8772ee41c3404fb0301ac79604489467e0a9ce1f3e97c24ae" dependencies = [ - "webpki 0.22.0", + "either", + "lazy_static", + "libc", ] [[package]] @@ -4340,19 +4021,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" [[package]] -name = "yaml-rust" -version = "0.4.5" +name = "wyz" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +checksum = "30b31594f29d27036c383b53b59ed3476874d518f0efb151b27a4c275141390e" dependencies = [ - "linked-hash-map", + "tap", ] [[package]] name = "zeroize" -version = "1.5.5" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94693807d016b2f2d2e14420eb3bfcca689311ff775dcf113d74ea624b7cdf07" +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" dependencies = [ "zeroize_derive", ] @@ -4368,17 +4049,3 @@ dependencies = [ "syn", "synstructure", ] - -[[package]] -name = "zip" -version = "0.5.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ab48844d61251bb3835145c521d88aa4031d7139e8485990f60ca911fa0815" -dependencies = [ - "byteorder", - "bzip2", - "crc32fast", - "flate2", - "thiserror", - "time 0.1.43", -] diff --git a/Cargo.toml b/Cargo.toml index eaf4e09dfc..3eda14e8e9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,24 +4,7 @@ resolver = "2" members = [ "modules", - "relayer", - "relayer-cli", - "relayer-rest", - "telemetry", "proto", - "tools/integration-test", - "tools/test-framework", -] - -exclude = [ - "ci/no-std-check", - "proto-compiler" -] - -# [patch.crates-io] -# tendermint = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } -# tendermint-rpc = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } -# tendermint-proto = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } -# tendermint-light-client = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } -# tendermint-light-client-verifier = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } -# tendermint-testgen = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } + "proto-compiler", + "derive", +] \ No newline at end of file diff --git a/README.md b/README.md index 8b90bbc69a..e8813f4d4a 100644 --- a/README.md +++ b/README.md @@ -1,124 +1,3 @@ # ibc-rs -[![Cosmos ecosystem][cosmos-shield]][cosmos-link] - -[![Build Status][build-image]][build-link] -[![End to End testing][e2e-image]][e2e-link] -[![Apache 2.0 Licensed][license-image]][license-link] -![Rust Stable][rustc-image] -![Rust 1.60+][rustc-version] - -Rust implementation of the Inter-Blockchain Communication (IBC) protocol. - -This project comprises primarily four crates: - -- The [`ibc`][ibc-crate-link] crate defines the main data structures and - on-chain logic for the IBC protocol. -- The [`ibc-relayer`][relayer-crate-link] crate provides an implementation - of an IBC relayer, as a _library_. -- The [`ibc-relayer-cli`][relayer-cli-crate-link] crate is a CLI (a wrapper - over the `ibc-relayer` library), comprising the - [`hermes`](https://hermes.informal.systems) binary. -- The [`ibc-proto`][ibc-proto-crate-link] crate is a library with Rust types generated from .proto definitions - necessary for interacting with [Cosmos SDK](https://github.com/cosmos/cosmos-sdk/tree/master/proto/cosmos) - and its [IBC structs](https://github.com/cosmos/ibc-go/tree/main/proto/ibc). -- The [`ibc-telemetry`][ibc-telemetry-crate-link] crate is a library for use in the `hermes` CLI, - for gathering telemetry data and exposing that in a Prometheus endpoint. -- The [`ibc-test-framework`][ibc-test-framework-crate-link] crate provides the infrastructure and framework for writing end-to-end (E2E) tests that include the spawning of the relayer together with Cosmos full nodes. - -See the table below for more details. - -Includes [TLA+ specifications](docs/spec). - -| Crate name | Type | Version | Docs | -|:-------------:|:------:|:-------------:|:-----:| -| [ibc](./modules) | lib | [![IBC Crate][ibc-crate-image]][ibc-crate-link] | [![IBC Docs][ibc-docs-image]][ibc-docs-link] | -| [ibc-relayer](./relayer) | lib | [![IBC Relayer Crate][relayer-crate-image]][relayer-crate-link] | [![IBC Relayer Docs][relayer-docs-image]][relayer-docs-link] | -| [ibc-relayer-cli](./relayer-cli) | bin: [hermes](relayer-cli/) | [![IBC Relayer CLI Crate][relayer-cli-crate-image]][relayer-cli-crate-link] | [![IBC Relayer CLI Docs][relayer-cli-docs-image]][relayer-cli-docs-link] | -| [ibc-relayer-rest](./relayer-rest) | lib | [![IBC Relayer REST Crate][relayer-rest-crate-image]][relayer-rest-crate-link] | [![IBC Relayer REST Docs][relayer-rest-docs-image]][relayer-rest-docs-link] | -| [ibc-proto](./proto) | lib | [![IBC Proto Crate][ibc-proto-crate-image]][ibc-proto-crate-link] | [![IBC Proto Docs][ibc-proto-docs-image]][ibc-proto-docs-link] | -| [ibc-telemetry](./telemetry) | lib | [![IBC Telemetry Crate][ibc-telemetry-crate-image]][ibc-telemetry-crate-link] | [![IBC Telemetry Docs][ibc-telemetry-docs-image]][ibc-telemetry-docs-link] | -| [ibc-test-framework](./tools/test-framework) | lib | [![IBC Test Framework Crate][ibc-test-framework-crate-image]][ibc-test-framework-crate-link] | [![IBC Test Framework Docs][ibc-test-framework-docs-image]][ibc-test-framework-docs-link] | - - -## Requirements - -The crates in this project require the latest stable version of Rust: `1.60.0`. - -## Hermes Guide - -The relayer CLI binary, called `hermes`, has a comprehensive guide at -[hermes.informal.systems](http://hermes.informal.systems). - -## Contributing - -IBC is specified in English in the [cosmos/ibc repo](https://github.com/cosmos/ibc). Any -protocol changes or clarifications should be contributed there. - -This repo contains the TLA+ specification and Rust implementation for the IBC -modules and relayer. If you're interested in contributing, please comment on an issue or open a new one! - -See also [CONTRIBUTING.md](./CONTRIBUTING.md). - -## Versioning - -We follow [Semantic Versioning](https://semver.org/), though APIs are still -under active development. - -## Resources - -- [IBC Website](https://cosmos.network/ibc) -- [IBC Specification](https://github.com/cosmos/ibc) -- [IBC Modules in Go](https://github.com/cosmos/ibc-go) -- [IBC Relayer in Typescript](https://github.com/confio/ts-relayer) -- [IBC Relayer in Go](https://github.com/cosmos/relayer) - -## License - -Copyright © 2021 Informal Systems Inc. and ibc-rs authors. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use the files in this repository except in compliance with the License. You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - -[ibc-crate-image]: https://img.shields.io/crates/v/ibc.svg -[ibc-crate-link]: https://crates.io/crates/ibc -[ibc-docs-image]: https://docs.rs/ibc/badge.svg -[ibc-docs-link]: https://docs.rs/ibc/ -[relayer-crate-image]: https://img.shields.io/crates/v/ibc-relayer.svg -[relayer-crate-link]: https://crates.io/crates/ibc-relayer -[relayer-docs-image]: https://docs.rs/ibc-relayer/badge.svg -[relayer-docs-link]: https://docs.rs/ibc-relayer/ -[relayer-cli-crate-image]: https://img.shields.io/crates/v/ibc-relayer-cli.svg -[relayer-cli-crate-link]: https://crates.io/crates/ibc-relayer-cli -[relayer-cli-docs-image]: https://docs.rs/ibc-relayer-cli/badge.svg -[relayer-cli-docs-link]: https://docs.rs/ibc-relayer-cli/ -[relayer-rest-crate-image]: https://img.shields.io/crates/v/ibc-relayer-rest.svg -[relayer-rest-crate-link]: https://crates.io/crates/ibc-relayer-rest -[relayer-rest-docs-image]: https://docs.rs/ibc-relayer-rest/badge.svg -[relayer-rest-docs-link]: https://docs.rs/ibc-relayer-rest/ -[ibc-proto-crate-image]: https://img.shields.io/crates/v/ibc-proto.svg -[ibc-proto-crate-link]: https://crates.io/crates/ibc-proto -[ibc-proto-docs-image]: https://docs.rs/ibc-proto/badge.svg -[ibc-proto-docs-link]: https://docs.rs/ibc-proto/ -[ibc-telemetry-crate-image]: https://img.shields.io/crates/v/ibc-telemetry.svg -[ibc-telemetry-crate-link]: https://crates.io/crates/ibc-telemetry -[ibc-telemetry-docs-image]: https://docs.rs/ibc-telemetry/badge.svg -[ibc-telemetry-docs-link]: https://docs.rs/ibc-telemetry/ -[ibc-test-framework-crate-image]: https://img.shields.io/crates/v/ibc-test-framework.svg -[ibc-test-framework-crate-link]: https://crates.io/crates/ibc-test-framework -[ibc-test-framework-docs-image]: https://docs.rs/ibc-test-framework/badge.svg -[ibc-test-framework-docs-link]: https://docs.rs/ibc-test-framework/ - -[build-image]: https://github.com/informalsystems/ibc-rs/workflows/Rust/badge.svg -[build-link]: https://github.com/informalsystems/ibc-rs/actions?query=workflow%3ARust -[e2e-image]: https://github.com/informalsystems/ibc-rs/workflows/End%20to%20End%20testing/badge.svg -[e2e-link]: https://github.com/informalsystems/ibc-rs/actions?query=workflow%3A%22End+to+End+testing%22 -[license-image]: https://img.shields.io/badge/license-Apache_2.0-blue.svg -[license-link]: https://github.com/informalsystems/ibc-rs/blob/master/LICENSE -[rustc-image]: https://img.shields.io/badge/rustc-stable-blue.svg -[rustc-version]: https://img.shields.io/badge/rustc-1.60+-blue.svg -[cosmos-shield]: https://img.shields.io/static/v1?label=&labelColor=1B1E36&color=1B1E36&message=cosmos%20ecosystem&style=for-the-badge&logo=data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4KPCEtLSBHZW5lcmF0b3I6IEFkb2JlIElsbHVzdHJhdG9yIDI0LjMuMCwgU1ZHIEV4cG9ydCBQbHVnLUluIC4gU1ZHIFZlcnNpb246IDYuMDAgQnVpbGQgMCkgIC0tPgo8c3ZnIHZlcnNpb249IjEuMSIgaWQ9IkxheWVyXzEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiIHg9IjBweCIgeT0iMHB4IgoJIHZpZXdCb3g9IjAgMCAyNTAwIDI1MDAiIHN0eWxlPSJlbmFibGUtYmFja2dyb3VuZDpuZXcgMCAwIDI1MDAgMjUwMDsiIHhtbDpzcGFjZT0icHJlc2VydmUiPgo8c3R5bGUgdHlwZT0idGV4dC9jc3MiPgoJLnN0MHtmaWxsOiM2RjczOTA7fQoJLnN0MXtmaWxsOiNCN0I5Qzg7fQo8L3N0eWxlPgo8cGF0aCBjbGFzcz0ic3QwIiBkPSJNMTI1Mi42LDE1OS41Yy0xMzQuOSwwLTI0NC4zLDQ4OS40LTI0NC4zLDEwOTMuMXMxMDkuNCwxMDkzLjEsMjQ0LjMsMTA5My4xczI0NC4zLTQ4OS40LDI0NC4zLTEwOTMuMQoJUzEzODcuNSwxNTkuNSwxMjUyLjYsMTU5LjV6IE0xMjY5LjQsMjI4NGMtMTUuNCwyMC42LTMwLjksNS4xLTMwLjksNS4xYy02Mi4xLTcyLTkzLjItMjA1LjgtOTMuMi0yMDUuOAoJYy0xMDguNy0zNDkuOC04Mi44LTExMDAuOC04Mi44LTExMDAuOGM1MS4xLTU5Ni4yLDE0NC03MzcuMSwxNzUuNi03NjguNGM2LjctNi42LDE3LjEtNy40LDI0LjctMmM0NS45LDMyLjUsODQuNCwxNjguNSw4NC40LDE2OC41CgljMTEzLjYsNDIxLjgsMTAzLjMsODE3LjksMTAzLjMsODE3LjljMTAuMywzNDQuNy01Ni45LDczMC41LTU2LjksNzMwLjVDMTM0MS45LDIyMjIuMiwxMjY5LjQsMjI4NCwxMjY5LjQsMjI4NHoiLz4KPHBhdGggY2xhc3M9InN0MCIgZD0iTTIyMDAuNyw3MDguNmMtNjcuMi0xMTcuMS01NDYuMSwzMS42LTEwNzAsMzMycy04OTMuNSw2MzguOS04MjYuMyw3NTUuOXM1NDYuMS0zMS42LDEwNzAtMzMyCglTMjI2Ny44LDgyNS42LDIyMDAuNyw3MDguNkwyMjAwLjcsNzA4LjZ6IE0zNjYuNCwxNzgwLjRjLTI1LjctMy4yLTE5LjktMjQuNC0xOS45LTI0LjRjMzEuNi04OS43LDEzMi0xODMuMiwxMzItMTgzLjIKCWMyNDkuNC0yNjguNCw5MTMuOC02MTkuNyw5MTMuOC02MTkuN2M1NDIuNS0yNTIuNCw3MTEuMS0yNDEuOCw3NTMuOC0yMzBjOS4xLDIuNSwxNSwxMS4yLDE0LDIwLjZjLTUuMSw1Ni0xMDQuMiwxNTctMTA0LjIsMTU3CgljLTMwOS4xLDMwOC42LTY1Ny44LDQ5Ni44LTY1Ny44LDQ5Ni44Yy0yOTMuOCwxODAuNS02NjEuOSwzMTQuMS02NjEuOSwzMTQuMUM0NTYsMTgxMi42LDM2Ni40LDE3ODAuNCwzNjYuNCwxNzgwLjRMMzY2LjQsMTc4MC40CglMMzY2LjQsMTc4MC40eiIvPgo8cGF0aCBjbGFzcz0ic3QwIiBkPSJNMjE5OC40LDE4MDAuNGM2Ny43LTExNi44LTMwMC45LTQ1Ni44LTgyMy03NTkuNVMzNzQuNCw1ODcuOCwzMDYuOCw3MDQuN3MzMDAuOSw0NTYuOCw4MjMuMyw3NTkuNQoJUzIxMzAuNywxOTE3LjQsMjE5OC40LDE4MDAuNHogTTM1MS42LDc0OS44Yy0xMC0yMy43LDExLjEtMjkuNCwxMS4xLTI5LjRjOTMuNS0xNy42LDIyNC43LDIyLjYsMjI0LjcsMjIuNgoJYzM1Ny4yLDgxLjMsOTk0LDQ4MC4yLDk5NCw0ODAuMmM0OTAuMywzNDMuMSw1NjUuNSw0OTQuMiw1NzYuOCw1MzcuMWMyLjQsOS4xLTIuMiwxOC42LTEwLjcsMjIuNGMtNTEuMSwyMy40LTE4OC4xLTExLjUtMTg4LjEtMTEuNQoJYy00MjIuMS0xMTMuMi03NTkuNi0zMjAuNS03NTkuNi0zMjAuNWMtMzAzLjMtMTYzLjYtNjAzLjItNDE1LjMtNjAzLjItNDE1LjNjLTIyNy45LTE5MS45LTI0NS0yODUuNC0yNDUtMjg1LjRMMzUxLjYsNzQ5Ljh6Ii8+CjxjaXJjbGUgY2xhc3M9InN0MSIgY3g9IjEyNTAiIGN5PSIxMjUwIiByPSIxMjguNiIvPgo8ZWxsaXBzZSBjbGFzcz0ic3QxIiBjeD0iMTc3Ny4zIiBjeT0iNzU2LjIiIHJ4PSI3NC42IiByeT0iNzcuMiIvPgo8ZWxsaXBzZSBjbGFzcz0ic3QxIiBjeD0iNTUzIiBjeT0iMTAxOC41IiByeD0iNzQuNiIgcnk9Ijc3LjIiLz4KPGVsbGlwc2UgY2xhc3M9InN0MSIgY3g9IjEwOTguMiIgY3k9IjE5NjUiIHJ4PSI3NC42IiByeT0iNzcuMiIvPgo8L3N2Zz4K -[cosmos-link]: https://cosmos.network +Moved to [centauri](https://github.com/composableFi/centauri) diff --git a/ci/.gitignore b/ci/.gitignore deleted file mode 100644 index 699ff5ada7..0000000000 --- a/ci/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -/gaia/**/* -data/**/* -*.log \ No newline at end of file diff --git a/ci/README.md b/ci/README.md deleted file mode 100644 index 087686ece8..0000000000 --- a/ci/README.md +++ /dev/null @@ -1,230 +0,0 @@ -# Continous Integration for ibc-rs - -This folder contains the files required to run the End to end testing in [Github Actions](https://github.com/informalsystems/ibc-rs/actions) - -## End to end (e2e) testing - -The [End to end (e2e) testing workflow](https://github.com/informalsystems/ibc-rs/actions?query=workflow%3A%22End+to+End+testing%22) spins up two `gaia` chains (`ibc-0` and `ibc-1`) in Docker containers and one container that runs the relayer. There's a script that configures the relayer (e.g. configure light clients and add keys) and runs transactions and queries. A successful run of this script ensures that the relayer is working properly with two chains that support `IBC`. - -### Testing Ethermint-based networks -At this moment, the automated E2E workflow does not spin up a network with the (post-Stargate) Ethermint module. In the meantime, you can test it manually by following one of the resources below: - -- [the official documentation on ethermint.dev](https://docs.ethermint.zone/quickstart/run_node.html) -- [using the tweaked E2E scripts from the Injective's fork](https://github.com/InjectiveLabs/ibc-rs/commit/669535617a6e45be9916387e292d45a77e7d23d2) -- [using the nix-based integration test scripts in the Cronos project](https://github.com/crypto-org-chain/cronos#quitck-start) - -### Running an End to end (e2e) test locally - -If you want to run the end to end test locally, you will need [Docker](https://www.docker.com/) installed on your machine. - -Follow these steps to run the e2e test locally: - -__Note__: This assumes you are running this the first time, if not, please ensure you follow the steps to [clean up the containers](#cleaning-up) before running it again. - -1. Clone the `ibc-rs` repo. - -2. Go into the repository folder: - - `cd ibc-rs` - -3. Build the current up-to-date relayer container image. Make sure you have a - copy of the `hermes` binary in the root of the repo, as the docker script - expects this. If you don't, you can run `cp ./target/debug/hermes .`. This - command might take a few minutes since it will do a fresh compile and build - of all modules: - - `docker-compose -f ci/docker-compose-gaia-current.yml build relayer` - -> Note: If you're looking to test on a legacy version of gaia, run: -> `docker-compose -f ci/docker-compose-gaia-legacy.yml build relayer`. -> You'll use the `docker-compose-gaia-legacy.yml` configuration instead of -> `docker-compose-gaia-current.yml` in all cases. - -4. Run all the containers (two containers, one for each chain and one for the relayer) - - `docker-compose -f ci/docker-compose-gaia-current.yml up -d ibc-0 ibc-1 relayer` - - If everything is successful you should see among the output something - similar to this: - ```shell - Network ci_relaynet Created - Container ibc-1 Started - Container ibc-0 Started - Container relayer Started - ``` - - If you want to ensure they are all running properly you can use the command: - - `docker-compose -f ci/docker-compose-gaia-current.yml ps` - - You should see the message below. Please ensure all containers are in a `running` state. - ```shell - - ibc-0 "/chain/gaia/run-gaia..." ibc-0 running - ibc-1 "/chain/gaia/run-gaia..." ibc-1 running - relayer "/bin/sh" relayer running - ``` - - __Note__: If this is the first time you're running this command, the `informaldev/ibc-0:[RELEASE TAG]` and `informaldev/ibc-1:[RELEASE TAG]` container images will be pulled from the Docker Hub. For instructions on how to update these images in Docker Hub please see the [upgrading the release](#upgrading-chains) section. - - -5. Run the command below to execute the relayer end to end (e2e) test. This command will execute the `e2e.sh` on the relayer container. The script will configure the light clients for both chains, add the private keys for both chains and run transactions on both chains (e.g. create-client transaction). - - `docker exec relayer /bin/sh -c /relayer/e2e.sh` - - If the script runs sucessfully you should see an output similar to this one: -```shell -================================================================================================================= - INITIALIZE -================================================================================================================= ------------------------------------------------------------------------------------------------------------------ -Show relayer version ------------------------------------------------------------------------------------------------------------------ -relayer-cli 0.2.0 ------------------------------------------------------------------------------------------------------------------ -Setting up chains ------------------------------------------------------------------------------------------------------------------ -Config: /relayer/simple_config.toml - Chain: ibc-0 - creating chain store folder: [/data/ibc-0] - Chain: ibc-1 [/data/ibc-1] - creating chain store folder: [/data/ibc-1] -Waiting 20 seconds for chains to generate blocks... -================================================================================================================= - CONFIGURATION -================================================================================================================= ------------------------------------------------------------------------------------------------------------------ -Add keys for chains ------------------------------------------------------------------------------------------------------------------ -key add result: "Added key testkey (cosmos1tc3vcuxyyac0dmayf887v95tdg7qpyql48w7gj) on ibc-0 chain" -key add result: "Added key testkey (cosmos1zv3etpuk4n7p54r8fhsct0qys8eqf5zqw4pqp5) on ibc-1 chain" ------------------------------------------------------------------------------------------------------------------ -Set the primary peers for clients on each chain ------------------------------------------------------------------------------------------------------------------ -Executing: hermes -c /relayer/simple_config.toml light add tcp://ibc-0:26657 -c ibc-0 -s /data/ibc-0 -p -y -f - Success Added light client: - chain id: ibc-0 - address: tcp://ibc-0:26657 - peer id: DF766B47325BE49E27F9DF325327853AAFB5BBCA - height: 731 - hash: 22639F0B84C0E95D51AB70D900E7BC0CBFBDF642F3F945093FF7AEB8120CC8DC - primary: true ------------------------------------------------------------------------------------------------------------------ -Executing: hermes -c /relayer/simple_config.toml light add tcp://ibc-1:26657 -c ibc-1 -s /data/ibc-1 -p -y -f - Success Added light client: - chain id: ibc-1 - address: tcp://ibc-1:26657 - peer id: B7A2809AA8AA825225D618DDD5200AB9BA236797 - height: 733 - hash: D5C190747A1A0805C4840FBF66EC3339E0C30520359EF36553508DBD775A6EEF - primary: true ------------------------------------------------------------------------------------------------------------------ -Set the secondary peers for clients on each chain ------------------------------------------------------------------------------------------------------------------ -Executing: hermes -c /relayer/simple_config.toml light add tcp://ibc-0:26657 -c ibc-0 -s /data/ibc-0 --peer-id 17D46D8C1576A79203A6733F63B2C9B7235DD559 -y - Success Added light client: - chain id: ibc-0 - address: tcp://ibc-0:26657 - peer id: 17D46D8C1576A79203A6733F63B2C9B7235DD559 - height: 735 - hash: 463691EED61772C333D38C5DC5F267946341F98ADE8EF9FBBE501A96022E5F1A - primary: false ------------------------------------------------------------------------------------------------------------------ -Executing: hermes -c /relayer/simple_config.toml light add tcp://ibc-1:26657 -c ibc-1 -s /data/ibc-1 --peer-id A885BB3D3DFF6101188B462466AE926E7A6CD51E -y - Success Added light client: - chain id: ibc-1 - address: tcp://ibc-1:26657 - peer id: A885BB3D3DFF6101188B462466AE926E7A6CD51E - height: 737 - hash: 5377D2FDCD1886129AF32AABFDFC4C80112B2465F49814E91C25FD325DE54DCC - primary: false -================================================================================================================= - CLIENTS -================================================================================================================= ------------------------------------------------------------------------------------------------------------------ -Create client transactions ------------------------------------------------------------------------------------------------------------------ -Creating ibc-1 client on chain ibc-0 -Message CreateClient for source chain: ChainId { id: "ibc-1", version: 1 }, on destination chain: ChainId { id: "ibc-0", version: 0 } -Jan 21 18:46:57.259 INFO relayer::event::monitor: running listener chain.id=ibc-1 -Jan 21 18:46:57.278 INFO relayer::event::monitor: running listener chain.id=ibc-0 -{"status":"success","result":[{"CreateClient":{"client_id":"07-tendermint-0","client_type":"Tendermint","consensus_height":{"revision_height":739,"revision_number":1},"height":"1"}}]} ------------------------------------------------------------------------------------------------------------------ -Creating ibc-0 client on chain ibc-1 -Message CreateClient for source chain: ChainId { id: "ibc-0", version: 0 }, on destination chain: ChainId { id: "ibc-1", version: 1 } -Jan 21 18:46:58.299 INFO relayer::event::monitor: running listener chain.id=ibc-0 -Jan 21 18:46:58.324 INFO relayer::event::monitor: running listener chain.id=ibc-1 -{"status":"success","result":[{"CreateClient":{"client_id":"07-tendermint-0","client_type":"Tendermint","consensus_height":{"revision_height":740,"revision_number":0},"height":"1"}}]} -``` - -### [Cleaning up](#cleaning-up) - -In order to clear up the testing environment and stop the containers you can run the command below - -`docker-compose -f ci/docker-compose-gaia-current.yml down` - -If the command executes successfully you should see the message below: - -```shell -Container relayer Removed -Container ibc-1 Removed -Container ibc-0 Removed -Network ci_relaynet Removed -``` - -### [Upgrading the gaia chains release and generating new container images](#upgrading-chains) - -The repository stores the files used to configure and build the chains for the containers. For example, the files for a `gaia` chain release `v5.0.0` can be seen [here](./chains/gaia) - -> Note: Please ensure you have gaiad installed on your machine and it matches the version that you're trying to upgrade. -> You can check by running `gaiad version` in your machine. -> -> If you need to upgrade your local `gaiad` binary, you can follow the steps -> listed in the Cosmos Hub documentation on [installing the binary](https://hub.cosmos.network/main/getting-started/installation.html). - -If you need to generate configuration files for a new gaia release and new containers, please follow the steps below: - -1. Move into the `ci` folder - - `cd ci` - - -2. Open the `build-ibc-chains.sh` file and change the release. Just replace the value for the `GAIA_BRANCH` parameter. For example to set it to release `v5.0.0` use: - `GAIA_BRANCH="v5.0.0"` - - -3. Run the `build-ibc-chains.sh` script: - - `./build-ibc-chains.sh` - - -__Note__: This will generate the files for the chains in the `/ci/chains/gaia` folder and build the Docker containers. At the end of the script it will ask if you want to push these new images to Docker Hub. In order to do so you need to have Docker login configured on your machine with permissions to push the container. If you don't want to push them (just have them built locally) just cancel the script execution (by hitting `CTRL+C`) - - -4. Committing the release files. **You have to** add the new chain files generated to the ibc-rs repository, just `git commit` the files, otherwise the CI might fail because private keys don't match. - - -5. Update the release for Docker Compose. If this new release should be the default release for running the end to end (e2e) test you need to update the release version in the `docker-compose.yml` file in the `ci` folder of the repository. Open the file and change the release version in all the places required (image name and RELEASE variables. For example, if current release is `v4.0.0` and the new one is `v5.0.0` just do a find and replace with these two values. - -Change the version in the image for ibc-0 and ibc-1 services: - - ``` - image: "informaldev/ibc-0:v4.0.0" - ``` - -And in the relayer service: - - ``` - args: - RELEASE: v4.0.0 - ``` - -6. Update the CI workflow - -There are currently two CI workflows, for running the E2E tests against two versions of gaiad: - - legacy release: `.github\workflows\e2e-gaia-legacy-release.yaml`, and - - current release: `.github\workflows\e2e-gaia-current-release.yaml`. - -Depending on which of the two setups you have upgraded at the prior steps, change the `name` key in the corresponding workflow file to match with the version of the upgraded gaia used, e.g.: - -`name: End to End testing (Gaia - v6.0.0)` diff --git a/ci/bootstrap_gaia.sh b/ci/bootstrap_gaia.sh deleted file mode 100755 index c1f9cd4509..0000000000 --- a/ci/bootstrap_gaia.sh +++ /dev/null @@ -1,94 +0,0 @@ -#!/bin/sh - -# This file can be used to initialize a chain - -# coins to add to each account -coins="100000000000stake,100000000000samoleans" -STAKE="100000000000stake" -# - the user also needs stake to perform actions -USER_COINS="${STAKE},${SAMOLEANS}samoleans" -#home="/chain" - -echo Node: "$MONIKER" -echo Chain: "$CHAIN_ID" -echo Chain IP: "$CHAIN_IP" -echo RPC Port: "$RPC_PORT" -echo GRPC Port: "$GRPC_PORT" -echo Home_Dir: "$CHAIN_HOME" - -# Clean home dir if exists -rm -Rf "$CHAIN_HOME" - -# Create home dir -mkdir -p "$CHAIN_HOME" - -ls -allh "$CHAIN_HOME" - -# Check gaia version -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Gaiad version" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad version --long - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Initialize chain $CHAIN_ID" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad init "$MONIKER" --chain-id "$CHAIN_ID" --home "$CHAIN_HOME" - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Replace addresses and ports in the config file and some performance tweaks" -echo "-------------------------------------------------------------------------------------------------------------------" -sed -i 's#"tcp://127.0.0.1:26657"#"tcp://'"$CHAIN_IP"':'"$RPC_PORT"'"#g' "$CHAIN_HOME"/config/config.toml -#sed -i 's#"tcp://0.0.0.0:26656"#"tcp://'"$CHAIN_ID"':'"$P2P_PORT"'"#g' "$CHAIN_HOME"/config/config.toml -#sed -i 's#grpc_laddr = ""#grpc_laddr = "tcp://'"$CHAIN_IP"':'"$GRPC_PORT"'"#g' "$CHAIN_HOME"/config/config.toml -sed -i 's/timeout_commit = "5s"/timeout_commit = "1s"/g' "$CHAIN_HOME"/config/config.toml -sed -i 's/timeout_propose = "3s"/timeout_propose = "1s"/g' "$CHAIN_HOME"/config/config.toml -sed -i 's/index_all_keys = false/index_all_keys = true/g' "$CHAIN_HOME"/config/config.toml - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Adding validator key" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad keys add validator --keyring-backend="test" --home "$CHAIN_HOME" --output json > "$CHAIN_HOME"/validator_seed.json -cat "$CHAIN_HOME"/validator_seed.json - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Adding user key" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad keys add user --keyring-backend="test" --home $CHAIN_HOME --output json > "$CHAIN_HOME"/user_seed.json -cat "$CHAIN_HOME"/user_seed.json - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Adding user2 key" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad keys add user2 --keyring-backend="test" --home $CHAIN_HOME --output json > "$CHAIN_HOME"/user2_seed.json -cat "$CHAIN_HOME"/user2_seed.json - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Adding user account to genesis" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad --home "$CHAIN_HOME" add-genesis-account $(gaiad --home "$CHAIN_HOME" keys --keyring-backend="test" show user -a) 1000000000stake -echo "Done!" - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Adding user2 account to genesis" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad --home "$CHAIN_HOME" add-genesis-account $(gaiad --home "$CHAIN_HOME" keys --keyring-backend="test" show user2 -a) 1000000000stake -echo "Done!" - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Adding validator account to genesis" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad --home "$CHAIN_HOME" add-genesis-account $(gaiad --home "$CHAIN_HOME" keys --keyring-backend="test" show validator -a) 1000000000stake,1000000000validatortoken -echo "Done!" - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Generate a genesis transaction that creates a validator with a self-delegation" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad --home "$CHAIN_HOME" gentx validator 1000000000stake --keyring-backend="test" --chain-id "$CHAIN_ID" -echo "Done!" - -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Collect genesis txs and output a genesis.json file" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad collect-gentxs --home "$CHAIN_HOME" -echo "Done!" diff --git a/ci/build-chains.sh b/ci/build-chains.sh deleted file mode 100755 index 2ec002b273..0000000000 --- a/ci/build-chains.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash - -## Programmatic list for creating Gaia Hub chains for testing IBC. -## Instead of blindly running this code, read it line by line and understand the dependecies and tasks. -## Prerequisites: Log into Docker Hub -set -eou pipefail -GAIA_BRANCH="v7.0.1" # Requires a version with the `--keyring-backend` option. v2.1 and above. - -echo "*** Requirements" -which git && which go && which make && which sed && which jq && which docker - -echo "*** Fetch gaiad source code" -git clone https://github.com/cosmos/gaia || echo "Already cloned." -cd gaia -git checkout "${GAIA_BRANCH}" -q - -echo "*** Build binary" -GOOS=linux CGO_ENABLED=0 make build - -echo "*** Create config using the built binary" -docker run -it --rm -v $(pwd)/build:/root:z alpine /root/gaiad testnet -o /root/chain_a --v 1 --chain-id chain_A --keyring-backend test -sed -i.bak -e 's/^index_all_keys[[:space:]]*=.*/index_all_keys = true/' build/chain_a/node0/gaiad/config/config.toml -sed -i.bak -e 's/^timeout_commit[[:space:]]*=.*/timeout_commit = "1s"/' build/chain_a/node0/gaiad/config/config.toml -sed -i.bak -e 's/^timeout_propose[[:space:]]*=.*/timeout_propose = "1s"/' build/chain_a/node0/gaiad/config/config.toml - -docker run -it --rm -v $(pwd)/build:/root:z alpine /root/gaiad testnet -o /root/chain_b --v 1 --chain-id chain_B --keyring-backend test -sed -i.bak -e 's/^index_all_keys[[:space:]]*=.*/index_all_keys = true/' build/chain_b/node0/gaiad/config/config.toml -sed -i.bak -e 's/^timeout_commit[[:space:]]*=.*/timeout_commit = "1s"/' build/chain_b/node0/gaiad/config/config.toml -sed -i.bak -e 's/^timeout_propose[[:space:]]*=.*/timeout_propose = "1s"/' build/chain_b/node0/gaiad/config/config.toml - -echo "*** Create Docker image and upload to Docker Hub" -cd .. -docker build -t informaldev/chain_a -f chain_a.Dockerfile . -docker build -t informaldev/chain_b -f chain_b.Dockerfile . - -# Get details from the config files -echo SECRET_A=$(jq -r .secret gaia/build/chain_a/node0/gaiad/key_seed.json) -echo SECRET_B=$(jq -r .secret gaia/build/chain_b/node0/gaiad/key_seed.json) -echo NODEID_A=$(jq -r .app_state.genutil.gen_txs[0].body.memo gaia/build/chain_a/node0/gaiad/config/genesis.json) -echo NODEID_B=$(jq -r .app_state.genutil.gen_txs[0].body.memo gaia/build/chain_b/node0/gaiad/config/genesis.json) - -read -p "Press ENTER to push image to Docker Hub or CTRL-C to cancel. " dontcare -docker push informaldev/chain_a -docker push informaldev/chain_b diff --git a/ci/build-ibc-chains.sh b/ci/build-ibc-chains.sh deleted file mode 100755 index 0fbcbde456..0000000000 --- a/ci/build-ibc-chains.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env bash - -## Programmatic list for creating Gaia Hub chains for testing IBC. -## Instead of blindly running this code, read it line by line and understand the dependencies and tasks. -## Prerequisites: Log into Docker Hub -set -eou pipefail - -## After updating the gaia version below, double-check the following (see readme.md also): -## - the new version made it to docker hub, and is available for download, e.g. `docker pull informaldev/ibc-1:v4.0.0` -## - the image versions and the relayer release in `docker-compose.yml` are consistent with the new version - -# For building current gaia use this -# GAIA_BRANCH="v5.0.8" # Requires a version with the `--keyring-backend` option. v2.1 and above. - -# For future gaia use this -GAIA_BRANCH="v7.0.1" # Requires a version with the `--keyring-backend` option. v2.1 and above. - -# Check if gaiad is installed and if the versions match -if ! [ -x "$(command -v gaiad)" ]; then - echo 'Error: gaiad is not installed.' >&2 - exit 1 -fi - -CURRENT_GAIA="$(gaiad version 2>&1)" -echo "Current Gaia Version: $CURRENT_GAIA" - -if [ "$GAIA_BRANCH" != "$CURRENT_GAIA" ]; then - echo "Error: gaiad installed is different than target gaiad ($CURRENT_GAIA != $GAIA_BRANCH)" - exit 1 -else - echo "Gaiad installed matches desired version ($CURRENT_GAIA = $GAIA_BRANCH)" -fi - -BASE_DIR="$(dirname $0)" -ONE_CHAIN="$BASE_DIR/../scripts/one-chain" - -echo "*** Building config folders" - -CHAIN_HOME="./chains/gaia/$GAIA_BRANCH" - -# Clean home dir if exists -rm -Rf "$CHAIN_HOME" - -# Create home dir -mkdir -p "$CHAIN_HOME" - -ls -allh "$CHAIN_HOME" - -# Check gaia version -echo "-------------------------------------------------------------------------------------------------------------------" -echo "Gaiad version" -echo "-------------------------------------------------------------------------------------------------------------------" -gaiad version --long --log_level error - -MONIKER=node_ibc_0 -CHAIN_ID=ibc-0 -CHAIN_IP=172.25.0.10 -RPC_PORT=26657 -GRPC_PORT=9090 -CHAIN_SAMOLEANS=100000000000 -"$ONE_CHAIN" gaiad "$CHAIN_ID" "$CHAIN_HOME" "$RPC_PORT" 26656 6060 "$GRPC_PORT" "$CHAIN_SAMOLEANS" - -MONIKER=node_ibc_1 -CHAIN_ID=ibc-1 -CHAIN_IP=172.25.0.11 -RPC_PORT=26657 -GRPC_PORT=9090 -CHAIN_SAMOLEANS=100000000000 -"$ONE_CHAIN" gaiad "$CHAIN_ID" "$CHAIN_HOME" "$RPC_PORT" 26656 6060 "$GRPC_PORT" "$CHAIN_SAMOLEANS" - -echo "*** Requirements" -which docker - -echo "*** Create Docker image and upload to Docker Hub" -docker build --build-arg CHAIN=gaia --build-arg RELEASE=$GAIA_BRANCH --build-arg NAME=ibc-0 --no-cache -t informaldev/ibc-0:$GAIA_BRANCH -f "$BASE_DIR/gaia.Dockerfile" . -docker build --build-arg CHAIN=gaia --build-arg RELEASE=$GAIA_BRANCH --build-arg NAME=ibc-1 --no-cache -t informaldev/ibc-1:$GAIA_BRANCH -f "$BASE_DIR/gaia.Dockerfile" . - -read -p "Press ANY KEY to push image to Docker Hub, or CTRL-C to cancel. " dontcare -docker push informaldev/ibc-0:$GAIA_BRANCH -docker push informaldev/ibc-1:$GAIA_BRANCH diff --git a/ci/build-simd.sh b/ci/build-simd.sh deleted file mode 100755 index 390cda7161..0000000000 --- a/ci/build-simd.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash - -## Programmatic list for creating a simd chain for testing IBC. -## Instead of blindly running this code, read it line by line and understand the dependecies and tasks. -## Prerequisites: Log into Docker Hub -set -eou pipefail -GAIA_BRANCH="master" - -echo "*** Requirements" -which git && which go && which make && which docker - -echo "*** Fetch gaiad source code" -git clone https://github.com/cosmos/cosmos-sdk || echo "Already cloned." -cd cosmos-sdk -git checkout "${GAIA_BRANCH}" -q - -echo "*** Build binary" -GOOS=linux make build-simd - -echo "*** Create Docker image and upload to Docker Hub" -cd .. -docker build -t informaldev/simd -f simd.Dockerfile . -read -p "Press ENTER to push image to Docker Hub or CTRL-C to cancel. " dontcare -docker push informaldev/simd diff --git a/ci/chain_a.Dockerfile b/ci/chain_a.Dockerfile deleted file mode 100644 index 9eae1fcb5a..0000000000 --- a/ci/chain_a.Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM alpine -LABEL maintainer="hello@informal.systems" - -EXPOSE 26656 26657 26660 - -ENTRYPOINT ["/usr/bin/gaiad"] - -CMD ["start"] - -VOLUME [ "/root" ] - -COPY gaia/build/gaiad /usr/bin/gaiad -COPY gaia/build/chain_a/node0/gaiad /root/.gaia -COPY gaia/build/chain_a/node0/gaiad/key_seed.json /root/key_seed.json diff --git a/ci/chain_b.Dockerfile b/ci/chain_b.Dockerfile deleted file mode 100644 index 7afbbacfcf..0000000000 --- a/ci/chain_b.Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM alpine -LABEL maintainer="hello@informal.systems" - -EXPOSE 26656 26657 26660 - -ENTRYPOINT ["/usr/bin/gaiad"] - -CMD ["start"] - -VOLUME [ "/root" ] - -COPY gaia/build/gaiad /usr/bin/gaiad -COPY gaia/build/chain_b/node0/gaiad /root/.gaia -COPY gaia/build/chain_b/node0/gaiad/key_seed.json /root/key_seed.json diff --git a/ci/chains/gaia/v3.0.0/ibc-0/config/app.toml b/ci/chains/gaia/v3.0.0/ibc-0/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v3.0.0/ibc-0/config/config.toml b/ci/chains/gaia/v3.0.0/ibc-0/config/config.toml deleted file mode 100644 index 03b013dc29..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Migth be either absolute path or path related to tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Migth be either absolute path or path related to tendermint's config directory. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configurattion Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v3.0.0/ibc-0/config/genesis.json b/ci/chains/gaia/v3.0.0/ibc-0/config/genesis.json deleted file mode 100644 index 617b5db01a..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/config/genesis.json +++ /dev/null @@ -1,285 +0,0 @@ -{ - "genesis_time": "2021-01-29T14:17:55.021324268Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1g88sephz2r6ym6aqjgfaflknhhpj4reev9k28z", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1atnl2j4w2adr7xpa8kercfrr55fg4kcfkf583u", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1g88sephz2r6ym6aqjgfaflknhhpj4reev9k28z", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1atnl2j4w2adr7xpa8kercfrr55fg4kcfkf583u", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1atnl2j4w2adr7xpa8kercfrr55fg4kcfkf583u", - "validator_address": "cosmosvaloper1atnl2j4w2adr7xpa8kercfrr55fg4kcfnaqja0", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "pOI/9x+U+bRejes4VPOlHl2OALAaG6yUoebv5gmXdww=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "d2c710169cd596180da34bc6dde5d28fd663e48c@192.168.1.213:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "A0SaYYx4qCJyjdew0+nHDykQMNQ8mXzuj6MM5JAaY3oC" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "MukV92ZBXAEPxNl+mHk7Gu+NRflkxmAOEmQPoyOS/Akz0vlVkHCgUagdiHpDpRNLL/6v18w9IA7BAaSht6233w==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "172800s" - }, - "voting_params": { - "voting_period": "172800s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-0/config/gentx/gentx-d2c710169cd596180da34bc6dde5d28fd663e48c.json b/ci/chains/gaia/v3.0.0/ibc-0/config/gentx/gentx-d2c710169cd596180da34bc6dde5d28fd663e48c.json deleted file mode 100644 index 8604a7903f..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/config/gentx/gentx-d2c710169cd596180da34bc6dde5d28fd663e48c.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1atnl2j4w2adr7xpa8kercfrr55fg4kcfkf583u","validator_address":"cosmosvaloper1atnl2j4w2adr7xpa8kercfrr55fg4kcfnaqja0","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"pOI/9x+U+bRejes4VPOlHl2OALAaG6yUoebv5gmXdww="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"d2c710169cd596180da34bc6dde5d28fd663e48c@192.168.1.213:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"A0SaYYx4qCJyjdew0+nHDykQMNQ8mXzuj6MM5JAaY3oC"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["MukV92ZBXAEPxNl+mHk7Gu+NRflkxmAOEmQPoyOS/Akz0vlVkHCgUagdiHpDpRNLL/6v18w9IA7BAaSht6233w=="]} diff --git a/ci/chains/gaia/v3.0.0/ibc-0/config/node_key.json b/ci/chains/gaia/v3.0.0/ibc-0/config/node_key.json deleted file mode 100644 index a34f5bed89..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"6uGEFm3VE1D/rPPet3qiUZJdMT/aVF2fmU/Yc5kqnlJ6uhSV/KX+Wdu9uHUjfg26zFdA4xByM9eyRHZNUEp8+Q=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v3.0.0/ibc-0/config/priv_validator_key.json deleted file mode 100644 index d5077632d0..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "11BA3491738A2B92BED24B71332AD77F00024369", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "pOI/9x+U+bRejes4VPOlHl2OALAaG6yUoebv5gmXdww=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "p4xRp3MT8KEPctLJWuY7oOw5sAuVazyY3+hBvmUrXpqk4j/3H5T5tF6N6zhU86UeXY4AsBobrJSh5u/mCZd3DA==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-0/key_seed.json b/ci/chains/gaia/v3.0.0/ibc-0/key_seed.json deleted file mode 100644 index ab144f2101..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/key_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1g88sephz2r6ym6aqjgfaflknhhpj4reev9k28z","pubkey":"cosmospub1addwnpepqt3t47qa3hzpf9au0nrxhs0h2tu7g5utlpl3nfka98m73za9h2gs6lrm4k9","mnemonic":"odor sibling reason pistol carry injury end mountain gauge velvet trouble beef casual find cigar before pen drift noise meat slush pistol chronic wool"} diff --git a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/41cf0c86e250f44deba09213d4fed3bdc32a8f39.address b/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/41cf0c86e250f44deba09213d4fed3bdc32a8f39.address deleted file mode 100644 index bca91e0431..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/41cf0c86e250f44deba09213d4fed3bdc32a8f39.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxNzo1Ny40NDI3NTI1NzEgLTA1MDAgRVNUIG09KzAuMTg0NjA2MjI3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiN0F6d0dMeWZmNXloTDB1ZiJ9.aZw80hDsm42xGZWfEt7_cp4qTV1Y9L-9KYFWdlf2Z4zPjISmDmSHOA.xCs1eEcwnMasy0jx.7yonWaNsLRaKXDa3K8FhhbOrQGyCh1765MgDuwV7EMFxlywGzpnhpB0_PadOytHs89i25fuwfPvw8KGIUYbBrF-9NE6E76dtcBSd49Fcg8Sq9UdYEyBG4C__TgK-6dWD8I3kn4TLVZ8yt87U0OH7vQfMqrhceCzQvZhWro6TcEUBQpbo4QZZDIEH_TA81z_JWNzEreXZ3F-auQHObl238jwCVzry55W1-btoHZV3L9NLYQ.jQaUCrcV9ZU7XeJ6BVj28g \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/eae7f54aae575a3f183d3db23c2463a5128adb09.address b/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/eae7f54aae575a3f183d3db23c2463a5128adb09.address deleted file mode 100644 index 0a2f5f945a..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/eae7f54aae575a3f183d3db23c2463a5128adb09.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxNzo1Ni4yMjczNDUxMzIgLTA1MDAgRVNUIG09KzAuMTkzNzA4NTA0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiNUp5RmZUQ2JLWVlxUkNqNCJ9.PAlCYv_dN1e4tGyNVeQlsXheYzOEvacBj4N94SrBd3Q0NIsnPShJoA.AH6A91TU4D74ONPs.BiMujTsrIou2KpIWQ_XRMgjPO_QThzujRVHmFgL5VBe4OhwKI86XwQdPtib8BQHSHId-oGHuIoYZ9pzsRZXDZdDZ6uTAX2Agq8s9CMRcT9tDGfgFTiQs3zPbO1joL1BrYSqGtr_b75Wems159TzQUGI1yZZcJgqbLswSg03ZEEZDxFoCUga7q6RPS0B7cWZx3o7GdBAx-iObsdmNp9h2Ok6qAf6RzTAqSJDgKFBdJLnt4DDwe1H530ik.7JFKm6ZogyEfJHgNAIpvzg \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/user.info b/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/user.info deleted file mode 100644 index 584392be6e..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxNzo1Ny40MjU3NzU4MTcgLTA1MDAgRVNUIG09KzAuMTY3NjI5NDQ3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoicFM2Ty1XVXB6eXhQQXN1TSJ9.w5_RrMMhj-33GL6dn07zRrX-hv92nMREh2c0-sTg6afQdFtoyJ9Sjw.uT0r5JSAcITVV072.38D79yPbUn8pElVmfiGtbZDwx1Ii6WWsQGdHFmeooNzp8Qa9XnbjgOhr5bbcb1CRGGY9DnR9VI_8EMF0tskC2b82ZsSbS90HoqvwFXRLELrI3avI2P8RDSH60SsOPYHhBHFHtRYSnA6oxjamy9nfZ7bbM_WU0HZ2jWio2cptpuGbXzyE3TFPrOvyioTp5KUh80gkD0cuKq2NhNnH_wHxWURSj3UxEdTBFXFvXn70oLD3rj8MvVr4-1cXPfV1GI7MLaEkLT8BH9RNBkaKyOSYLm1Id_2bCDJJw-14ka3JorihwfbLmrbF9uEkXIp3zdnzY-mG-Mh51nWzUt3seJQ-C8gqE8FE-1U.j_H_QFmqyTl6o97oxeTN8g \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/validator.info deleted file mode 100644 index 931a9f1251..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxNzo1Ni4yMDY4NDUxOTYgLTA1MDAgRVNUIG09KzAuMTczMjA4NDU1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiZWVyTVRDZ1FkT3hobzRJbCJ9.fLybsw-F-KD13P8G7I99yJwb3htJYjf2gR11_Ek27nSGPGNgJOXKZg.Pf0kNyUPLuqW0k5y.hDrnr6Cfu0m3dPLXr7UONiuWsGnx5gwQW6GWzZk3hYPd2ziAYOt79fBSwodY1VVohHftzZgwzvO07eBcvK6HzAB4e4uk_MME2uxHoS8gEC5FvwoJyKgSLYnMcJmK8XRUjgAKL9LV6ptwF0YmmxL_50I3N2qDsVRwldEqIiNm9HmuQ2PhfUPbnWfl0lMPIZK9pY4KDyA-Aw35vtEgF1A9ns2CsteRxJnC31apqfrirHSk127K-P7n04caAx033TjGIKVTLOgEdjCHCB4xJ9BIlkxyy1KFgelJrrwOkxkBKRFyHOtfs4c__nk5d8arZiKTzLnwju_s2CEF9cB4iRmyqhbaGTfejtTAnHalosBKs_qwt2F1.Oa8WLImfXbdXsJi4PWajgw \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-0/validator_seed.json b/ci/chains/gaia/v3.0.0/ibc-0/validator_seed.json deleted file mode 100644 index ea4713ffea..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1atnl2j4w2adr7xpa8kercfrr55fg4kcfkf583u","pubkey":"cosmospub1addwnpepqdzf5cvv0z5zyu5d67cd86w8pu53qvx58jvhem505vxwfyq6vdaqy6tfsfg","mnemonic":"frown boost win vacuum wink excuse frog cube visit cousin hover cook broccoli gift usual useful fork vault during name dog suspect much another"} diff --git a/ci/chains/gaia/v3.0.0/ibc-1/config/app.toml b/ci/chains/gaia/v3.0.0/ibc-1/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v3.0.0/ibc-1/config/config.toml b/ci/chains/gaia/v3.0.0/ibc-1/config/config.toml deleted file mode 100644 index 1ba972916c..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Migth be either absolute path or path related to tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Migth be either absolute path or path related to tendermint's config directory. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configurattion Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v3.0.0/ibc-1/config/genesis.json b/ci/chains/gaia/v3.0.0/ibc-1/config/genesis.json deleted file mode 100644 index 35096d1758..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/config/genesis.json +++ /dev/null @@ -1,285 +0,0 @@ -{ - "genesis_time": "2021-01-29T14:18:07.030169146Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1y5yvcjvzjukdrk3993xlumt05468mytrwt5v4j", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1y8sa4m3pv69nlfraxuh2zg2ghvua2wfddnnkkr", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1y8sa4m3pv69nlfraxuh2zg2ghvua2wfddnnkkr", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1y5yvcjvzjukdrk3993xlumt05468mytrwt5v4j", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1y8sa4m3pv69nlfraxuh2zg2ghvua2wfddnnkkr", - "validator_address": "cosmosvaloper1y8sa4m3pv69nlfraxuh2zg2ghvua2wfdg88r6s", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "Sl/6UDZu7Ttd9MOsUnMO4xswFYfVbXyUDRRNbAXH2kQ=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "c15eddb601e66c008c241ccfc80f7cf3358cfa7f@192.168.1.213:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AwgqmhChUIpyAo157HyCorxiB8s0KKH0UtoHYazZdZal" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "4mnUAljJh9m6Wd1Yl7aLDwoR4gezH9ugjSW4SqSOtp5FCnUoX+UR3oAR0kQpzX5S4qLbV4nfANmwj6Cgw9hrOw==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "172800s" - }, - "voting_params": { - "voting_period": "172800s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-1/config/gentx/gentx-c15eddb601e66c008c241ccfc80f7cf3358cfa7f.json b/ci/chains/gaia/v3.0.0/ibc-1/config/gentx/gentx-c15eddb601e66c008c241ccfc80f7cf3358cfa7f.json deleted file mode 100644 index 526c658967..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/config/gentx/gentx-c15eddb601e66c008c241ccfc80f7cf3358cfa7f.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1y8sa4m3pv69nlfraxuh2zg2ghvua2wfddnnkkr","validator_address":"cosmosvaloper1y8sa4m3pv69nlfraxuh2zg2ghvua2wfdg88r6s","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"Sl/6UDZu7Ttd9MOsUnMO4xswFYfVbXyUDRRNbAXH2kQ="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"c15eddb601e66c008c241ccfc80f7cf3358cfa7f@192.168.1.213:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AwgqmhChUIpyAo157HyCorxiB8s0KKH0UtoHYazZdZal"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["4mnUAljJh9m6Wd1Yl7aLDwoR4gezH9ugjSW4SqSOtp5FCnUoX+UR3oAR0kQpzX5S4qLbV4nfANmwj6Cgw9hrOw=="]} diff --git a/ci/chains/gaia/v3.0.0/ibc-1/config/node_key.json b/ci/chains/gaia/v3.0.0/ibc-1/config/node_key.json deleted file mode 100644 index 39c903be39..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"V6YnjXHp7KVvKGjeiVnpo2gXYTz/L7nlxBqJACkGIejLNOwsxmAX3JtFYT0tmUXBdFtVsv8V9oCl1tdHRnLtkQ=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v3.0.0/ibc-1/config/priv_validator_key.json deleted file mode 100644 index 4ba202f381..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "574E7B0DC5C0C1D726059ADC9CF3EDCF06815C46", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "Sl/6UDZu7Ttd9MOsUnMO4xswFYfVbXyUDRRNbAXH2kQ=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "EteHYnSwj1wLWwDYKJpnxOE1aeEDrlpOTVE7IMCb4idKX/pQNm7tO130w6xScw7jGzAVh9VtfJQNFE1sBcfaRA==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-1/key_seed.json b/ci/chains/gaia/v3.0.0/ibc-1/key_seed.json deleted file mode 100644 index cef64cfb7f..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/key_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1y5yvcjvzjukdrk3993xlumt05468mytrwt5v4j","pubkey":"cosmospub1addwnpepqvfmqqc0ghq7rjwe5kqyg2n9d9xclfcwvvcvemscqxqdkh68xcqfsaz5m4c","mnemonic":"stick blossom laugh account diamond tribe tool joy fox anger strategy trash vendor eager clog core office device patch install turkey muscle conduct city"} diff --git a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/21e1daee21668b3fa47d372ea12148bb39d5392d.address b/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/21e1daee21668b3fa47d372ea12148bb39d5392d.address deleted file mode 100644 index 18fabf8b58..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/21e1daee21668b3fa47d372ea12148bb39d5392d.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxODowOC4xODA5NDQ5MjQgLTA1MDAgRVNUIG09KzAuMTM3NjA0NTE3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiWUNHNnFUekJtVVVqZExiTCJ9.MZFL8-n9JInB8KXYiYrgUbowFxAn1Phgh-GjJ7ZkqKuHP4LQxe6H5g.xYWRM_9wTncgcwPM.RTCU1R9HB_21CeQ19AB7tzqM02iXYw6zJzMJNQ1HXWVu95CtDcpY5Pq4qfLD_C4G7CLOWfIFa80MOVpamL6oY5LoilnRd7HGAiI-DBFuMTJkDJfjrtOuU6I4eo9s3O7ixB4JvxoKI9pDlXv9lXkAQwBwAvQQ0VSZItm2JuQp0NxFb0cIVX7rx6_lBTq5YPg10vrjp3QQ9zssGH1Ljc7PhB3NUe20lGrY3g_1GvrBqt3hhJIih03Ai3Fv.w1RB9aOyRJDHA_dA876pBQ \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/2508cc4982972cd1da252c4dfe6d6fa5747d9163.address b/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/2508cc4982972cd1da252c4dfe6d6fa5747d9163.address deleted file mode 100644 index 001e62a194..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/2508cc4982972cd1da252c4dfe6d6fa5747d9163.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxODowOS4zNzI3NzQxOTMgLTA1MDAgRVNUIG09KzAuMTc1NzQzMjU2IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiSUlBb3k0ZzV2ZlVRbV9sSSJ9.V7DHUp6H5PWzVX2xoS6MFpIAElsf0OpA2sD6xlPWRQCEKzCy3NGBdQ.nZ8t10M-8IxPvrI_.Fl0jClobEpAwJ-TOyj3dmbaMW2wRo_w8GP8PXnfUOaFJVWvrIW0yrlYp-M8r5zR1sa0OQy3QnOuu63xVMw9ouGqa1NznLHOjqi1CGP4dWrCFe9a1-owLk28U5lYIrI58kJWaor7REJjcypG0Y4D8eum2n4tXx82C-z_M3tHvhBrMUCONSPcjr36HxkXlc3RQYLn13QH6Qlrq6oxF3W51qN3LmJgtaStAnpEpn8LsABurRQ.4vt3nXFsGLbzAJ0EjW3KJw \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/user.info b/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/user.info deleted file mode 100644 index f9e01b2aed..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxODowOS4zNTYxMTkwMDMgLTA1MDAgRVNUIG09KzAuMTU5MDg4MDM2IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiVGJQcUZETEwxTWRTZGhleiJ9._pcGFxyxfLg910MktjRBqMqgfYa8xijq5HHYwJ2LnCC3a69LX4woMw.Ubual1F2FZV3gW2T.S9nk-LnI3fcvbXcTzyj_ghMAK14dZC0JMPqwRtwiXD9fhOlNBnWrKrACS1OYi2P3_KbA9F3diFnSSiCj77DW0HTw1Ez6bQf0nFOP1usveK1EzXFSnuhWqRa8xGjLtaggN5m_wdeC8kWimKXAaigqxGVwHkcfx3EzMI0GLArkPPXBIzs4uOXnsMn98GDLlN85iPEWEBiIhHerRiI0V5ftdpDNeW2ZWgj5GXafxpALAMve38jlob79Hplq73PJqSMmRu-Yczr0YxwErhKeZqFCjaVl6bCoxQo9pASsEsc9gJNrBewHGQAZnOXKFSaJzcePGCwlROf-isxqdxBnlqUekg5nlRsb4sc.xmoB_vY5tCmBQ6eqYitrHw \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/validator.info deleted file mode 100644 index d8c9886640..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMS0yOSAwOToxODowOC4xNzQ5ODM0MTcgLTA1MDAgRVNUIG09KzAuMTMxNjQzMDEwIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoid0h4dDRWbFMxVDV3TGs3ZSJ9.8D3_glu4tLCAGBAIYciFA5gWr9TlSyw28IjQqBg72WqQ9weaVNCdMg.DqW1vdFKv8LMYlHY.AvlW-jmxVZuQZ1i1EOazHlDBmQZJUxQGhM1fJ3AMNMD9cFkC65TF97KhcNUE2Tpe8xMpMTOPz6Ys8gfs4hE-LhkyQ_y_pWw9y-Pz3vC1jA7dRrRYJ-YKV29PdFGHXTyp_hnlAyFEaJ8c-sjjVs1Hz1szSfyNKWzG9w4rRJdGPtFeEbb9TjhXLDoAWAdXYIFOZ2qjKl5XFycMbhlacHO9FMQ5rS1xcFUhL1wF4_wxRvbZ3CqhQXxsVb4mlb8kMXRAkLYoPUjKIAez0J0arMc_2DscfTT5kt9XUjcst8zs5ssBMBgpVHmaB4pxlUNUiSQwiKJ3lA4m0iturK-csJ82CUL5LTJSIq1GpqlSMK27XbOEQXs5.g4pBKgKgTOGnGowaK-kx1A \ No newline at end of file diff --git a/ci/chains/gaia/v3.0.0/ibc-1/validator_seed.json b/ci/chains/gaia/v3.0.0/ibc-1/validator_seed.json deleted file mode 100644 index ff4f3ed72c..0000000000 --- a/ci/chains/gaia/v3.0.0/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1y8sa4m3pv69nlfraxuh2zg2ghvua2wfddnnkkr","pubkey":"cosmospub1addwnpepqvyz4xss59gg5usz34u7clyz527xyp7txs52razjmgrkrtxewkt22myg9hq","mnemonic":"hill corn gravity veteran pond unaware sure blame fringe enable torch myself hint tag provide entire first auto hidden tide like glory bread ramp"} diff --git a/ci/chains/gaia/v4.0.0/ibc-0/config/app.toml b/ci/chains/gaia/v4.0.0/ibc-0/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v4.0.0/ibc-0/config/config.toml b/ci/chains/gaia/v4.0.0/ibc-0/config/config.toml deleted file mode 100644 index 4714078da1..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v4.0.0/ibc-0/config/genesis.json b/ci/chains/gaia/v4.0.0/ibc-0/config/genesis.json deleted file mode 100644 index 8041ce39a5..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/config/genesis.json +++ /dev/null @@ -1,294 +0,0 @@ -{ - "genesis_time": "2021-02-03T14:50:54.649630107Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos15f5wafvkhp9yczqw55y996qnc37w8wm3jk82ht", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1ej0c20022zj9xr702a8ytjwmgx6ayy6jwaqkjt", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos15f5wafvkhp9yczqw55y996qnc37w8wm3jk82ht", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1ej0c20022zj9xr702a8ytjwmgx6ayy6jwaqkjt", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "200000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1ej0c20022zj9xr702a8ytjwmgx6ayy6jwaqkjt", - "validator_address": "cosmosvaloper1ej0c20022zj9xr702a8ytjwmgx6ayy6jtf5r7c", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "T4iSX7p9Zfb9ZJS9KmsjSj3ik7Anman99Z9kjdZiNpc=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "f3e4bdc90fb01a1167636bb40362967569fa150f@192.168.1.213:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AxhLYE7FGQkOnVaFBpY9pxwBUUBr3D2IQVyguaU9Rb2w" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "dx/A9Z4TjhlLnS3P5aYMSea2b0IKZP8X7p1zHoLHV+4QCEhKOZMwXY0dKyuCTuafpq+/9VJyOWLqvRnvx+M5ig==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "172800s" - }, - "voting_params": { - "voting_period": "172800s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-0/config/gentx/gentx-f3e4bdc90fb01a1167636bb40362967569fa150f.json b/ci/chains/gaia/v4.0.0/ibc-0/config/gentx/gentx-f3e4bdc90fb01a1167636bb40362967569fa150f.json deleted file mode 100644 index c07fca93cd..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/config/gentx/gentx-f3e4bdc90fb01a1167636bb40362967569fa150f.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1ej0c20022zj9xr702a8ytjwmgx6ayy6jwaqkjt","validator_address":"cosmosvaloper1ej0c20022zj9xr702a8ytjwmgx6ayy6jtf5r7c","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"T4iSX7p9Zfb9ZJS9KmsjSj3ik7Anman99Z9kjdZiNpc="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"f3e4bdc90fb01a1167636bb40362967569fa150f@192.168.1.213:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AxhLYE7FGQkOnVaFBpY9pxwBUUBr3D2IQVyguaU9Rb2w"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["dx/A9Z4TjhlLnS3P5aYMSea2b0IKZP8X7p1zHoLHV+4QCEhKOZMwXY0dKyuCTuafpq+/9VJyOWLqvRnvx+M5ig=="]} diff --git a/ci/chains/gaia/v4.0.0/ibc-0/config/node_key.json b/ci/chains/gaia/v4.0.0/ibc-0/config/node_key.json deleted file mode 100644 index 75d41cbcd7..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"SFxLztE9i3reZ1yF6QUN1VbLtNSU1Tr6sMAQ8YMUVhpQSacMoM8IOrsCpZi28Svst8tPZQOmC+0qwYCdw+4HVA=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v4.0.0/ibc-0/config/priv_validator_key.json deleted file mode 100644 index e0b9bb1ffa..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "3E23CAA4341C954786AE114941796B95B563EAFE", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "T4iSX7p9Zfb9ZJS9KmsjSj3ik7Anman99Z9kjdZiNpc=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "Y4CimyecevlH/DCP1f3Dg8t1T4PVsYrIWR9CuqMxYy5PiJJfun1l9v1klL0qayNKPeKTsCeZqf31n2SN1mI2lw==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-0/key_seed.json b/ci/chains/gaia/v4.0.0/ibc-0/key_seed.json deleted file mode 100644 index 145e5db799..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/key_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos15f5wafvkhp9yczqw55y996qnc37w8wm3jk82ht","pubkey":"cosmospub1addwnpepq2vk4rpd39kuzklulxx9llkcwcr628m6qe54yyuy0q4vpy30ujlas7cze0u","mnemonic":"split mixture arrive polar point entire luggage analyst thank toy dove olive correct choice day dawn increase canal disease price dentist alcohol sense item"} diff --git a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/a268eea596b84a4c080ea50852e813c47ce3bb71.address b/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/a268eea596b84a4c080ea50852e813c47ce3bb71.address deleted file mode 100644 index 2805a8a34a..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/a268eea596b84a4c080ea50852e813c47ce3bb71.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MDo1Ni45ODY3NTkxMDcgLTA1MDAgRVNUIG09KzAuMTM4NDA3MDgyIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiSWFqZ25zVDZwZmZWT0ZaOSJ9.Au0cr2CiSPSn_L6qZ0JvOGph0M8733QvSmQv8AdCx3AF66JinLxobA.E2zOZfMPndHrF_iq.1NDhA42TdW6_gUPgsN2T4ycMY8-fd8oqZDrg-T_uz6Yt99z31zohe8vVLAMJJSg_Z9n82yv9wSk1_K8y_PqoRMKOI-nYq8ed5iiN63p72IczObndCd6IiEURlEE4I5CkuTeVjxuGNh_cqS2AFBjWnX0x5d27Jf__7YjkJG1OkaNX_8gVS0cRs7I0YTyYSs410vvK4HX1YEGNOWCT-SYX68alGuIMv7WecU1p3dT3hzChlw.tkE1W32vYIj2XMfyRb2A4Q \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/cc9f853dea50a4530fcf574e45c9db41b5d21352.address b/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/cc9f853dea50a4530fcf574e45c9db41b5d21352.address deleted file mode 100644 index ed76c2030a..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/cc9f853dea50a4530fcf574e45c9db41b5d21352.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MDo1NS44MjExMDg5NTkgLTA1MDAgRVNUIG09KzAuMTU4NjEzMDQ3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiYjRzYUVOendvSkV3UVZtTSJ9.UZbI7pYbcENIDtx5Hsxh0Lci8Z_y1YwSdapRDeZE6BfKxUXlfAi2lA.ElMF8zQ1ot39SWZb.34UIgm11oh4WDlerswbv8vIJlTaicVP9PKH9WeeDzpppBLu01ge21YeEEBRm51-0EHBw8KSCT14CK20DbX_RpaQ-R3rB0A64orUhh-BdAouB_7URaAcJzl_kWHtrvDOvaOR-PIUA6PPJ3SNJEgodaYnXR8_zX21uLdcim6mEkTbhgYka80tpsMu0m8OP0jy8_IMAak9Obx80xPdMoK6EDKX94bH8IyJGMJ5idLjndURi9mvkDqS28nTU.8W6Eael_wwHzE1hfDjn4EA \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/user.info b/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/user.info deleted file mode 100644 index 6ab1f30d9f..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MDo1Ni45NzUyMTQ0ODggLTA1MDAgRVNUIG09KzAuMTI2ODYyNDczIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiX2VtTGdadWNCQ3k0VmhWZSJ9._2XDBX2GLEQPSwHnx3KUphkyVRiOPqONkUNgSZRKiPfX_tKZduKR6A.-ICHk1c9gjagrUBD.zR6_o_XKCUxiYNh6HK01pANtaTHQV__9ASbPTGoBH3rpNfCGYUysD5iWEL2HmvsLIamMRWDOC_FvmS1iop5_GUM_ZiMx4Y8rJa9Q69S34xtRdizjfr6rdTSeNlDHZSHosnJdEd7qWhWT8eppS5z_xPl0ENoD5KHCvvFU1SqxjQ0f1ufWwWnb-qvqHJgAEX9lQXyhXLR_DN4HMhGo7WyZVqN_xYqPvD7mBzKwQXBLMvfK8DI4SrDp1KZLhoHxnYOKaSIDjz0ntvmfRi-ISSpqJvImSw71GLZ2JcS5akx9M_TOXxi16AVW--YhQcDJahT8ATOyxFqj-hJIVbU6oUU1vk-74w2I76Y.O_5Pgh2CGkrCaJuNPOlBtA \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/validator.info deleted file mode 100644 index f266a39123..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MDo1NS44MDUxMDMwOCAtMDUwMCBFU1QgbT0rMC4xNDI2MDcxMzQiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJfbkZVNVBkRlZLTXhkOEtlIn0.FQeeUZLy5q5dYQK_7lIglUGB_bm3EsqLcUosojtkxbqf2QDGCPgmLw.eouNcsGAooGILwmP.izRK3eBdPfQlgz7cLUbekdvyzSewoObzazGgfFZu2ffqKZqapij2ckzmGqOzJ4XKQrA-8FGWPq2ZANTAoOsCExFaV9ZhPBBgZGi1YOgHyVjaQ8xDkNLswZ16m5Z9P80HZR3-C5CixiqxQMCNUy-T7rj24aWvYaFxRVQ4QoonRA0O0WcGUZZUCRRFnBj0SBE4-i_NoOpGyM69KmuQmI7hW_dqmDxJqaFB9GhxtQdWF1dA5fClsCAj-s9dbBJOUzdd30NsZ0bSYKdohUJe9HBOiZrEKT_1uO0cYQMEqps7DcUqoIky8XAAv7153SrU8BzCUicgWRYWim1QLRtP8B6YWY_bP-EzAE7wThv-2iLNCjbU-cRD.3oMuadqNJ7yxVez17l_Bxg \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-0/validator_seed.json b/ci/chains/gaia/v4.0.0/ibc-0/validator_seed.json deleted file mode 100644 index 0b90e2606c..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1ej0c20022zj9xr702a8ytjwmgx6ayy6jwaqkjt","pubkey":"cosmospub1addwnpepqvvykczwc5vsjr5a26zsd93a5uwqz52qd0wrmzzptjstnffagk7mqqyjvjs","mnemonic":"front fix fury drip kind cluster hire hedgehog balcony gown oyster rug pond quality exact spread ship match kiwi gold beyond fame car ill"} diff --git a/ci/chains/gaia/v4.0.0/ibc-1/config/app.toml b/ci/chains/gaia/v4.0.0/ibc-1/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v4.0.0/ibc-1/config/config.toml b/ci/chains/gaia/v4.0.0/ibc-1/config/config.toml deleted file mode 100644 index c32f66cf63..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v4.0.0/ibc-1/config/genesis.json b/ci/chains/gaia/v4.0.0/ibc-1/config/genesis.json deleted file mode 100644 index 2c49ab669c..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/config/genesis.json +++ /dev/null @@ -1,294 +0,0 @@ -{ - "genesis_time": "2021-02-03T14:51:06.54169119Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1vwxc6zytm7evqagh27keefldqtnpntrkhdqrgy", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1l5akq8gvv7c35l3cmgu3xvje6m7df7tf5m8hm3", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1vwxc6zytm7evqagh27keefldqtnpntrkhdqrgy", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1l5akq8gvv7c35l3cmgu3xvje6m7df7tf5m8hm3", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "200000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1l5akq8gvv7c35l3cmgu3xvje6m7df7tf5m8hm3", - "validator_address": "cosmosvaloper1l5akq8gvv7c35l3cmgu3xvje6m7df7tf30nzhz", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "GuS1ygd8w+B00mMo7N/+PqNBH7jLbkJjGNo9eA/oW34=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "b6f08e59e6071a9ddfb7f66d08723b1bef442f71@192.168.1.213:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AgQZuntbPUcfQZHy0NhGerdUa6dmFxcZO8rGkLPa3zrB" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "WXaiZujqnOXrUUEBxkofpTZlxrUV9Nfn90GwhJaQ/4MsJKx95Yhang0BNHy68GDLNiWzGjYS2Kz4c5V/lRcjGA==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "172800s" - }, - "voting_params": { - "voting_period": "172800s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-1/config/gentx/gentx-b6f08e59e6071a9ddfb7f66d08723b1bef442f71.json b/ci/chains/gaia/v4.0.0/ibc-1/config/gentx/gentx-b6f08e59e6071a9ddfb7f66d08723b1bef442f71.json deleted file mode 100644 index 82f090e98e..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/config/gentx/gentx-b6f08e59e6071a9ddfb7f66d08723b1bef442f71.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1l5akq8gvv7c35l3cmgu3xvje6m7df7tf5m8hm3","validator_address":"cosmosvaloper1l5akq8gvv7c35l3cmgu3xvje6m7df7tf30nzhz","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"GuS1ygd8w+B00mMo7N/+PqNBH7jLbkJjGNo9eA/oW34="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"b6f08e59e6071a9ddfb7f66d08723b1bef442f71@192.168.1.213:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AgQZuntbPUcfQZHy0NhGerdUa6dmFxcZO8rGkLPa3zrB"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["WXaiZujqnOXrUUEBxkofpTZlxrUV9Nfn90GwhJaQ/4MsJKx95Yhang0BNHy68GDLNiWzGjYS2Kz4c5V/lRcjGA=="]} diff --git a/ci/chains/gaia/v4.0.0/ibc-1/config/node_key.json b/ci/chains/gaia/v4.0.0/ibc-1/config/node_key.json deleted file mode 100644 index 479ecaf48f..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"AQYU4jfG59xE9H1aL/u+uAgSRlzJM499W9lp6B9O5TDkkvxTFY45gBOVxKRd97cOM922/yJHYEhfRoHs3ooXQQ=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v4.0.0/ibc-1/config/priv_validator_key.json deleted file mode 100644 index 5fb082a9f8..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "83241810ACDABC2880B48E29AF113AC5907A20DA", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "GuS1ygd8w+B00mMo7N/+PqNBH7jLbkJjGNo9eA/oW34=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "4Vuni8zdWWJQdT7iwXrpxuPFSlwX9s9P8TBBTCXRuxwa5LXKB3zD4HTSYyjs3/4+o0EfuMtuQmMY2j14D+hbfg==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-1/key_seed.json b/ci/chains/gaia/v4.0.0/ibc-1/key_seed.json deleted file mode 100644 index 2224cd7eec..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/key_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1vwxc6zytm7evqagh27keefldqtnpntrkhdqrgy","pubkey":"cosmospub1addwnpepqtwa3rh2rl90q2ctm9yvp565vg7mpkfcpvej5t3kxe3shk3lxsx7zugmyzf","mnemonic":"vintage protect level embrace pencil often casual couple allow snake bronze quality delay earn nothing approve embrace margin jelly private obvious color bomb float"} diff --git a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/638d8d088bdfb2c0751757ad9ca7ed02e619ac76.address b/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/638d8d088bdfb2c0751757ad9ca7ed02e619ac76.address deleted file mode 100644 index 8ca62ee00a..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/638d8d088bdfb2c0751757ad9ca7ed02e619ac76.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MTowOC45MDU0NDAzMjMgLTA1MDAgRVNUIG09KzAuMTYxMDgyMzc2IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiM2liRE0teEpreEIyTGNxcyJ9.rsJFZy_PrEDTPLPX2LNKgonmOv3kxFkRuTuDjLgZehSamY7JIZs69g.M1lDKbntKiMxA1RS.2cVYYt5cql1jk7e8SJ62bFOF6ARdV4GlPbi6Z3cc6ua-tveq46qDB7gzLGYWFkEPWl96-CyjbTIi75p5fn0moBiqLX9P_Y97mQPgXGfQjnbjaNPRk1zDREICdCpPvKdqjWD5gQJQBIjJzKurGen3l3tPRj3BrRtY5_Pv0stjSMRegNKErnZ6IE1fsxi_8uiuQDfdhFwjTm9bPLPomYRSSGOhPmahQzsEuv7Pw1ePba0hMA.MlBJiRSqxY7nuhkDai2rRQ \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/fd3b601d0c67b11a7e38da39133259d6fcd4f969.address b/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/fd3b601d0c67b11a7e38da39133259d6fcd4f969.address deleted file mode 100644 index 458fed6584..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/fd3b601d0c67b11a7e38da39133259d6fcd4f969.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MTowNy43MTcxODgyNzcgLTA1MDAgRVNUIG09KzAuMTYyMzI1MTI1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiX2lHWmtzNFAyZVlKUjlsViJ9.XyxB43Bi5WC5hitA_xVez_ZULcED4AwogGLxzE4CWQ_JEN3M4kQu6g.JEA6TzEZep_qpLUw.zHOpL5lR2hKKHspGA3zPL0H3s4B3Ru-8F0G7DWfwSbGuXSfu4AtjxOys0bbyB-DEBd9x-l4OpFyrG-ccwHNNKp5AAX99kVLbJW-c2hz19e0iaBrq83ycsQ0anvwy1yckaujDxrJ1hlG0ASEvcKQqWViq1u_ujuG0HX2N6qsCMtfSHR0Pi2-JryGlaKZKrXoKLz02Fu_h6jQNpIQ7oHHos0NTzA-KJxsUcqKHt5r1ScbPEKxr-2SZ4MTo.kz2rKiUHOihpTQYjKPvaOw \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/user.info b/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/user.info deleted file mode 100644 index fe235e09cd..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MTowOC44ODkyMzg0MDMgLTA1MDAgRVNUIG09KzAuMTQ0ODgwNDYzIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiTGhQYXZUdDRxMlMwVWw1ViJ9.C397HbHnLZQfC5K9cNM3NHWcoLZhiAElUxQQVH2nHOZCOVZAE2aARg.F6B2C7YoAIIVxZSi.0Gqp7ZcXWIDsGQL3K2n3DIZ4VwcRfSXRijIk1v8ZuKbH02H0EBAopKDIszA-IbYNwlllE1_58tuFde143yKfbQ3G04upv5KYBVr5VCpun2jBtO6DcUkzbyGgBQiIgJ54bnfrw7AaQrTzfgtequY0jidm8sOhMKYW68zyBDPBhcAxWcxmODvq6brYq2-xyvtHFJ5pMJKymmdvRyGugL1YkawLjJkhfkWgCz6lys9DZ9kQvvCJdx47xpufniSxm72Nia0bvitSC67zp6Vs1UDovisX7a_P9wFIsCCr3x9iBDrgjV3RhUjoWuVDC2kR731Bs1XbQMJBIF5ovWgwgIunaQb4muNhbmE.gvVp69e7BLcqCc594sV7jQ \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/validator.info deleted file mode 100644 index a4abaf81cb..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMi0wMyAwOTo1MTowNy43MDEwMjA5OTYgLTA1MDAgRVNUIG09KzAuMTQ2MTU3ODI3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiaTN6aU03SlFHWGI4RnlITyJ9.aFLyxaSlXM2028jnoET9m66Kv4OG0pNXf-daElLM9AJ3ONm7yMzWwA.gZGLlKKLxfDTcZ0x.HnlMnQ6zuqPtdF3YbbTrptmN9YgRrCh58QFCbDB5oEE4udj0hPZDhIGv7PFYGddSV04oEcgqQuo7h0x-UG119wLtfm_1oXvaDWIJY8vSfMbX_ZItOQahpfpSjE2-eU5ZqFKsVOoxV8o1vw5XqUHsvNRVWM9CoeM-bB3emHLKySyq-Pgzh26jp-HF27Ive8kEYg59rWFq62kWfaaAveBqVHJUccUlo9vtHLwAEd9doUaFj97zS26-8_grRfNgwLZjyHFFWbGlbfGLsn3yQNTFWNtTJ4VW_ZwvEuAxJ69Gy33evTJbYQawXB-PNrq907lIfzbFlPtQNmKq6Ou7M1ofnWmJJNfRXwUyF933mhJp-cxwjvWv.K3KPLo19G-up65pcbtCnHw \ No newline at end of file diff --git a/ci/chains/gaia/v4.0.0/ibc-1/validator_seed.json b/ci/chains/gaia/v4.0.0/ibc-1/validator_seed.json deleted file mode 100644 index ae03aa3240..0000000000 --- a/ci/chains/gaia/v4.0.0/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1l5akq8gvv7c35l3cmgu3xvje6m7df7tf5m8hm3","pubkey":"cosmospub1addwnpepqgzpnwnmtv75w86pj8edpkzx02m4g6a8vct3wxfmetrfpv76muavz507azh","mnemonic":"column pupil truly plastic trial profit arena wonder alien worry food tonight alien member merit kitchen actress inside blossom clump age resource repair street"} diff --git a/ci/chains/gaia/v4.1.0/ibc-0/config/addrbook.json b/ci/chains/gaia/v4.1.0/ibc-0/config/addrbook.json deleted file mode 100644 index df217df04b..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/config/addrbook.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "key": "a345b77cdfaa7499fb5d6514", - "addrs": [] -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/config/app.toml b/ci/chains/gaia/v4.1.0/ibc-0/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v4.1.0/ibc-0/config/config.toml b/ci/chains/gaia/v4.1.0/ibc-0/config/config.toml deleted file mode 100644 index 4714078da1..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v4.1.0/ibc-0/config/genesis.json b/ci/chains/gaia/v4.1.0/ibc-0/config/genesis.json deleted file mode 100644 index 4c200cdc3e..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/config/genesis.json +++ /dev/null @@ -1,294 +0,0 @@ -{ - "genesis_time": "2021-03-10T23:11:19.074673019Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos19gk88alnakp2ccfls3vxshpclqqv5gs7ca3uf4", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1wlmpgkdk8suvyr2f8k00rfhrtzyax4m0t2jt3v", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos19gk88alnakp2ccfls3vxshpclqqv5gs7ca3uf4", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1wlmpgkdk8suvyr2f8k00rfhrtzyax4m0t2jt3v", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "200000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1wlmpgkdk8suvyr2f8k00rfhrtzyax4m0t2jt3v", - "validator_address": "cosmosvaloper1wlmpgkdk8suvyr2f8k00rfhrtzyax4m0w7x7al", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "iuhBmH0fkvkZDJBMxhbsVyws433RY+RBVfpaK7m/QgY=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "45cc9bede9a7d77955b8770b47267069f9d910b6@192.168.1.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "A09doPsGZIeiIE4QdZxt28539imOelzvTf5wlQrcFB6V" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "D8Rb+8eIVmz222pe212HLryE/6h5Kg5aerEU77Mw09p4TSkOEKPuAca8WvjrboRcFDpDc9Qh9m37fuGwQmm/Aw==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "172800s" - }, - "voting_params": { - "voting_period": "172800s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/config/gentx/gentx-45cc9bede9a7d77955b8770b47267069f9d910b6.json b/ci/chains/gaia/v4.1.0/ibc-0/config/gentx/gentx-45cc9bede9a7d77955b8770b47267069f9d910b6.json deleted file mode 100644 index 60d77586ea..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/config/gentx/gentx-45cc9bede9a7d77955b8770b47267069f9d910b6.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1wlmpgkdk8suvyr2f8k00rfhrtzyax4m0t2jt3v","validator_address":"cosmosvaloper1wlmpgkdk8suvyr2f8k00rfhrtzyax4m0w7x7al","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"iuhBmH0fkvkZDJBMxhbsVyws433RY+RBVfpaK7m/QgY="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"45cc9bede9a7d77955b8770b47267069f9d910b6@192.168.1.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"A09doPsGZIeiIE4QdZxt28539imOelzvTf5wlQrcFB6V"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["D8Rb+8eIVmz222pe212HLryE/6h5Kg5aerEU77Mw09p4TSkOEKPuAca8WvjrboRcFDpDc9Qh9m37fuGwQmm/Aw=="]} diff --git a/ci/chains/gaia/v4.1.0/ibc-0/config/node_key.json b/ci/chains/gaia/v4.1.0/ibc-0/config/node_key.json deleted file mode 100644 index 802b32756b..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"cL2y+gGxn5GpzI9gXcG4swoe0MGiNHEJFejbnWjXiSKerCGitavdUVLWBuug74FbAA6G/hpw+7y3Ng/EP1tGnQ=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v4.1.0/ibc-0/config/priv_validator_key.json deleted file mode 100644 index 535087bf4f..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "028FA4039CA625CDCF67FA42CD1252A0ED1B58F3", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "iuhBmH0fkvkZDJBMxhbsVyws433RY+RBVfpaK7m/QgY=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "IbIVd94XuhcfqBr5KRZgZy3TiolDo0kuerw6PXMfDv6K6EGYfR+S+RkMkEzGFuxXLCzjfdFj5EFV+lorub9CBg==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/key_seed.json b/ci/chains/gaia/v4.1.0/ibc-0/key_seed.json deleted file mode 100644 index 74515d1149..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/key_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos19gk88alnakp2ccfls3vxshpclqqv5gs7ca3uf4","pubkey":"cosmospub1addwnpepq2f7zqadncegg3m5ke7dpa064p5xmltlg39pnplylztlmsxaa5e9zcwwcc9","mnemonic":"meat domain tube damp notice pioneer useful cherry develop harbor identify yard police distance quote volume number kind lamp word region lumber motor donate"} diff --git a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/2a2c73f7f3ed82ac613f8458685c38f800ca221e.address b/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/2a2c73f7f3ed82ac613f8458685c38f800ca221e.address deleted file mode 100644 index 7b4bbf9521..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/2a2c73f7f3ed82ac613f8458685c38f800ca221e.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMToyMS40MzQ2NTcxMTUgLTA1MDAgRVNUIG09KzAuMTU5Nzk5Njc3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiN1VaQkYwTUQtMF9RTG5XcCJ9.7ul__VTSeFv8NGFhFNgg0NeToZbM9_9mDbSeuf1mBDpcM4R51ind-g.f6U0SMd-AX-F3Vpr.fTPtpbW9Tw_fP6SO9_oW9MAUcTh8ce8Uhy5VScoEJmp0_XSFANCMX4GCUgjCmidZHWVZs3I2w059POgVGx2j3QETW8Ha3YEzq_v9NPV7Cp4wupKHWCuMVXMQDJxzepBXXvos8chliSHQJSJXg2kmrkU3gSKKrARmfphvDVR94SkoeFMICQK_3PkbIsAApdclkj6_16LVlCzQWZJ4c1B4-OIjTxDdRdtbQJOQkSI9p4PHOQ.uTjBcYGCjzEPzWkqEyPNdA \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/77f61459b63c38c20d493d9ef1a6e35889d3576f.address b/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/77f61459b63c38c20d493d9ef1a6e35889d3576f.address deleted file mode 100644 index 54ada7e01a..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/77f61459b63c38c20d493d9ef1a6e35889d3576f.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMToyMC4yNDY4ODM4NjMgLTA1MDAgRVNUIG09KzAuMTYxMjA4MDQxIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoicTY1a3dYN3ZRbmFTX2JtRCJ9.ZiY-spyh7NYc9Pd83APYLg3Gf24UCnQvexiM2HOjKxy54fpZceQggA.eC5y8lYbobmLehya.hdZnBJdY8Z6TD45gmlbsCBN3VhE003weVCk8_-cK0AhdjU6vz4mM8uIDE-HZ5RmCN6xjPxjQkUdNqdIyLQxv5AUo6T3dNAKK3K0j3wpRLIvNMzF4E1aGB0xBdW7DjEsFIRyrvHf0UCDPwrALLXUYRhgZgdgDP7dkxRGjBP9vXx2l7Y7vHOfPTerAENtSW-uj30ck4L5M-Ni8Ps9WHwuoFuQ7iR8CD-zXimuKdW7eusTdfL-mf7ZhT6F9.4EHrhYHsSGMEVnChYcGcWA \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/user.info b/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/user.info deleted file mode 100644 index 9551a79564..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMToyMS40MTg1ODA0NiAtMDUwMCBFU1QgbT0rMC4xNDM3MjI5OTUiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJoRkNwQmIwbTUwajNielhHIn0.C3PQTfSyFpuK7rbdEL6Z6zN2reOBRnDGHUV-jN_P9imE_1A933e7lA.9XKKOi6b1Nlw_90U.yoQd3cWpPpeYmX0cfpq_y1lPJ7gmLfo321FCYoD9keRhJFe9QVlxIQl7v_l2A5Uah92I55_Is1BxFJhlMEyHWaasdCHQ9OXzfO52Kwrm5v_IPyngJcCKk90yLa6oeoV12XmCBjOyZqIy7Ux10IOGxQoykznl68JLxb1aACyLNQ7ECOh87lUqLlcgu4LzMerlA6gY6oMW7FjxzV_kFLCCHHyF-fP4gy3vs6_lfozVHH0fOOWLQCDc8eWQcs3USltaLRmJDvHTqoZj6h3B8TLtJKqHc48jRdK9jBC3qgo9EZuLvBobef2M0XGgu8kiAv6WfkYsyEldilnfj7xkPn4BacAZXo3V1gg.kk405OOVQZDLVWoHPgaywA \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/validator.info deleted file mode 100644 index 043b8d9379..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMToyMC4yMzAyNzQ2NTQgLTA1MDAgRVNUIG09KzAuMTQ0NTk4ODU4IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiMkNkZ28zekhfb3B3bk9RLSJ9.q6n61AXUzUb6qNDNRbytFs0pdd0psGHO6xxWPJa91KFHhCo6e1lZbA.ZK_aytbygZCmJicK.yCj0FFc9w1m5eVknK9t5RlD38yqRw0VbO2VGMVTLOp86QtriWoTeQ4xblUKv-5EneXhM6uTzrG_vgf8_xTQbfaad44hWm8EXKV0Fg0AZhdoAqMWlN3DbzIUh5JVdmtL3JywB4blJvM3ugRsYM-CDyDOoUeam03ci7gFu-VzGn510_dEff7y4MTTUlvgrNeF7emCKTt-lrneBoknc6CqdvqgfhGpcwhT5XkPpeZgHm22jn-pLOtQifvsvU4T_gCK2SnNNPaorDkeMA7gWjiEqvBB4JPZTsO8Ps7Ua46KPmLqBaFlq_jba1rJvQSLhU4jTOH6WSbTdNm0cCyQ8zzUOv9dQT8nJGvOiWJBwLFZNu-S4_Ktd.MfJdPHsR-lchlIXrr-9bBA \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-0/validator_seed.json b/ci/chains/gaia/v4.1.0/ibc-0/validator_seed.json deleted file mode 100644 index 1116869c59..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1wlmpgkdk8suvyr2f8k00rfhrtzyax4m0t2jt3v","pubkey":"cosmospub1addwnpepqd84mg8mqejg0g3qfcg8t8rdm0880a3f3ea9em6dlecf2zkuzs0f287l3tw","mnemonic":"picnic busy spike vintage various below sight faculty kitten require share swamp member injury trouble mean desk scrap version unfair engine success ring still"} diff --git a/ci/chains/gaia/v4.1.0/ibc-1/config/app.toml b/ci/chains/gaia/v4.1.0/ibc-1/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v4.1.0/ibc-1/config/config.toml b/ci/chains/gaia/v4.1.0/ibc-1/config/config.toml deleted file mode 100644 index c32f66cf63..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v4.1.0/ibc-1/config/genesis.json b/ci/chains/gaia/v4.1.0/ibc-1/config/genesis.json deleted file mode 100644 index 56b21ad6d2..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/config/genesis.json +++ /dev/null @@ -1,294 +0,0 @@ -{ - "genesis_time": "2021-03-10T23:11:30.807005731Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1448z7w3j5gsht5fa099wzdqtz9hv7ryur05rm8", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1l7yp6p7gmhvcjwngh6vvxcw2s8l60hkeznfarz", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1448z7w3j5gsht5fa099wzdqtz9hv7ryur05rm8", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1l7yp6p7gmhvcjwngh6vvxcw2s8l60hkeznfarz", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "200000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1l7yp6p7gmhvcjwngh6vvxcw2s8l60hkeznfarz", - "validator_address": "cosmosvaloper1l7yp6p7gmhvcjwngh6vvxcw2s8l60hke88ag03", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "ECbqm7nyI8nJgB24gc9fdBb7v3a3OGSx6pfmMpy2XNA=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "b56974b6f467580c92b7a0fe752797eca18915d0@192.168.1.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "A7t/fqE7mjd+KK1dCev7LtDxy3cQ2v6NCzRrVXUGxBR2" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "hsNHETTZp2u1Q9lhtR98bnupI+jYJx+zcqlJcNv7CIkIqtOOIWFSUy7WKGhvYPzSslHt691KG+VHph63JMjw/A==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "172800s" - }, - "voting_params": { - "voting_period": "172800s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-1/config/gentx/gentx-b56974b6f467580c92b7a0fe752797eca18915d0.json b/ci/chains/gaia/v4.1.0/ibc-1/config/gentx/gentx-b56974b6f467580c92b7a0fe752797eca18915d0.json deleted file mode 100644 index afb3aaad30..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/config/gentx/gentx-b56974b6f467580c92b7a0fe752797eca18915d0.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1l7yp6p7gmhvcjwngh6vvxcw2s8l60hkeznfarz","validator_address":"cosmosvaloper1l7yp6p7gmhvcjwngh6vvxcw2s8l60hke88ag03","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"ECbqm7nyI8nJgB24gc9fdBb7v3a3OGSx6pfmMpy2XNA="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"b56974b6f467580c92b7a0fe752797eca18915d0@192.168.1.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"A7t/fqE7mjd+KK1dCev7LtDxy3cQ2v6NCzRrVXUGxBR2"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["hsNHETTZp2u1Q9lhtR98bnupI+jYJx+zcqlJcNv7CIkIqtOOIWFSUy7WKGhvYPzSslHt691KG+VHph63JMjw/A=="]} diff --git a/ci/chains/gaia/v4.1.0/ibc-1/config/node_key.json b/ci/chains/gaia/v4.1.0/ibc-1/config/node_key.json deleted file mode 100644 index 095f1d5de5..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"WJ0+t+9V9WQZtXI0lGu9lMjTNmWn/KNv8oVvbxu3BnwwPzd52rKilEaJUh6B2tj4+dKQbcmiWAB9tZKwlR9vIw=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v4.1.0/ibc-1/config/priv_validator_key.json deleted file mode 100644 index 826fedc842..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "DB69AC7415E648FEE0FBBA6325B465C9BF722043", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "ECbqm7nyI8nJgB24gc9fdBb7v3a3OGSx6pfmMpy2XNA=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "YpdaGAiOAWB6OUvkV+1A24bPbP9UsEKqWIKnDvpdWr4QJuqbufIjycmAHbiBz190Fvu/drc4ZLHql+YynLZc0A==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-1/key_seed.json b/ci/chains/gaia/v4.1.0/ibc-1/key_seed.json deleted file mode 100644 index 61ad598158..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/key_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1448z7w3j5gsht5fa099wzdqtz9hv7ryur05rm8","pubkey":"cosmospub1addwnpepqw98q342d8vad42pu8yv9u5nyekrrjzxcft4cu02qtwgvljck6mu75tz9j2","mnemonic":"trip double kite slam town swim train nice vivid torch soon artwork capital sweet struggle raise moral vault unique tape cloth elephant tenant lazy"} diff --git a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/ad4e2f3a32a22175d13d794ae1340b116ecf0c9c.address b/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/ad4e2f3a32a22175d13d794ae1340b116ecf0c9c.address deleted file mode 100644 index 2d31e6a69d..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/ad4e2f3a32a22175d13d794ae1340b116ecf0c9c.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMTozMy4xNzA0ODQ5NzYgLTA1MDAgRVNUIG09KzAuMTYyMTY5MTY1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiV3lSNHhGcS1LX1FVS3VEViJ9.uJXyQ92NvMUNARblxv7LkNkCJr13S0RSQEde8TJgBkyPC-tyxjTSqA.2pM_o8dJOrMO65VY.IyhKLjlCLDCNivHsToX4hDd8MN_Gj_tyqk1Eoww_pqjY0l09sgFZZPbeJkSAXYkzqjn731Ex2EnoWz1xaI3MTahSFBeQzCp8pEzph1gnzoW7xj3cxsPloHOnnMubtUFi30WX_F0M0GrnsMeFIu8AGgg2WiRbFRLKKvglRwcOvImqvL_BGvjtOeyTpEAgNcpTFV-HG4dQ7KFpktD1xjrZGaNZnUkiKXyxopXjFb91iK6_gA.b5X94oQnSbvvABVmetKeUg \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/ff881d07c8ddd9893a68be98c361ca81ffa7ded9.address b/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/ff881d07c8ddd9893a68be98c361ca81ffa7ded9.address deleted file mode 100644 index d0cfc02049..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/ff881d07c8ddd9893a68be98c361ca81ffa7ded9.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMTozMS45ODA3ODk4MTEgLTA1MDAgRVNUIG09KzAuMTYyMTcxNzQ1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiM1FmLUxhZHhPSy1DQUhCUiJ9.UKlMcMJ92PPqca6Qp1PCqdIpRnmUQ0qS3azHrUWvQusjk0x2-meSdQ.RvQGnB2W_wd0vM2y.pP6wnn21-BCh4Nnvr3cfY7uAQ8fq8WTx82r_TuAi4c1v7Ht_bbqlUo00z3IASHw8pbaabjUG2G2EzwdQXRocdofEhlObmDSs1zt6vvq3_dNQcQicCwchzxXQg4YOtTDjGKXfStHquF7Mp3r_AJWC1P180TB304onvNqAvDQ4Cgcm2NvTcKaCsBrsyTG5FlUrx5iWGFcdcLCNmsJya0AZRLJPv9_AydM8mJjNyledAMOwx2J-xpNpv1l8.H3Yb2CX4QXxg62Dsz9bdGw \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/user.info b/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/user.info deleted file mode 100644 index a6cc1b40fd..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMTozMy4xNTQyNzYyNjggLTA1MDAgRVNUIG09KzAuMTQ1OTYwNDU0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiY1JHRS1zSURoTXpQSTl5MCJ9.wnB46OdjIw8sjPlHz210M8KdMT3fV8IEDYs80XPT3hYuWVyujpVQxQ.F4UkwlZvSudsxCox._PTGfc7oXgB_t3oLzLZPZIPVhADRE2bPkF_vr2sgdx5Z_ZH_sIh5QN8-ai7d0nP_o9JIAmt9omdUSbbrCxRuBFcX9jjOoSGOVOetPWL3z3DvwpsJd2URXa5axUYWd_xpXL-qKUvDWSyAdLGNuzKvROezsfCeMHxUnVD4k1bxC8BnFjxasFxcbYYVeAEoMPAZvQTkl8tou0k8I48lZla3NoqADKxteVb9nlOR9ETMcEDvNmyJ9lJmtlFWWc2Z29Zn1vhw9AyLGMjf0sGDdRx6KrZZSVvHqPSEb_hqATAQRydhi0btRo5GRifJIKdIJc5o6HA2VmH010cUSZlHWUpecam9D9SjpRM.deJYBz0D8wmMX3KNsibaEg \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/validator.info deleted file mode 100644 index cc3daec056..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wMy0xMCAxODoxMTozMS45NjQ1NzExNzQgLTA1MDAgRVNUIG09KzAuMTQ1OTUzMDg3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiTnlnUVNnNGQ3dzRhUnN5eCJ9.kGy2rUdFlT6NNF60b2sBUIwQhM3AFwy4tQEcKHdeDBqvHcNNm5JgBA.7WHjfrN3sJuiY0Vx.NQGQRCEc9xy4m_XCjMLVyzLmgxU02kATCzE6i7l7DV554MmELFcNVvh-4PEhT95Gr2t3zZOOlePj3TQnQsQiGJWdaeJLhx8ki9KNHakH1-gX_GNkJH7SwMbs1CZ-sf6w9Q5AJHUU3OSLe5WyzLM1PEGB4ioje1W9eLkg7mxgVkGd9XtbXzZ4HBqigwFI6GyBgCXZREvFp4JHjJzqTwZ0DAEpzAz6rkjwk-0C_1snQdknLVD_cHyVgphrJx2d_2gDXXfu5py6SOOai2yakFeDfqn7qetS1OFl6JJGJlwjO7LeKUlftmG3NsirQz7p1_tXCJ2UbOk4UxzEB0apv4IEN7gooSjmiNGgFFBkguiQxBtgo7rt.W07CCcXAsSr25-1d2zM3iQ \ No newline at end of file diff --git a/ci/chains/gaia/v4.1.0/ibc-1/validator_seed.json b/ci/chains/gaia/v4.1.0/ibc-1/validator_seed.json deleted file mode 100644 index ab63235057..0000000000 --- a/ci/chains/gaia/v4.1.0/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1l7yp6p7gmhvcjwngh6vvxcw2s8l60hkeznfarz","pubkey":"cosmospub1addwnpepqwah7l4p8wdrwl3g44wsn6lm9mg0rjmhzrd0argtx3442agxcs28vay605h","mnemonic":"climb sight despair thank anxiety brown busy south fit buffalo follow bronze episode length few spirit auction salad donate elegant problem logic tiger ocean"} diff --git a/ci/chains/gaia/v4.2.0/ibc-0/config/addrbook.json b/ci/chains/gaia/v4.2.0/ibc-0/config/addrbook.json deleted file mode 100644 index f7a8c71e83..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/config/addrbook.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "key": "af219e789fd5acf4bfdc46d0", - "addrs": [] -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/config/app.toml b/ci/chains/gaia/v4.2.0/ibc-0/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v4.2.0/ibc-0/config/config.toml b/ci/chains/gaia/v4.2.0/ibc-0/config/config.toml deleted file mode 100644 index 4714078da1..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v4.2.0/ibc-0/config/genesis.json b/ci/chains/gaia/v4.2.0/ibc-0/config/genesis.json deleted file mode 100644 index b855cf5be0..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/config/genesis.json +++ /dev/null @@ -1,314 +0,0 @@ -{ - "genesis_time": "2021-06-14T13:12:59.924746991Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos13sg5tlgu37spr854gekelgnnyzlucrq740y9us", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos174vqlk0w3xsqkzg8anuct387hlqkwtfahqu9j8", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1pggja8l6rcpknqwpgyjysk7a2sm9fv6rhguha2", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1pggja8l6rcpknqwpgyjysk7a2sm9fv6rhguha2", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos13sg5tlgu37spr854gekelgnnyzlucrq740y9us", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos174vqlk0w3xsqkzg8anuct387hlqkwtfahqu9j8", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1pggja8l6rcpknqwpgyjysk7a2sm9fv6rhguha2", - "validator_address": "cosmosvaloper1pggja8l6rcpknqwpgyjysk7a2sm9fv6rjugz3e", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "IWE690GXTZ+i+l2cqDS4AO7U5YZEViGea7WNjcxH61I=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "240c6c414bfb15dab6e170c7b1781de818736374@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AojvMkfmkHDkXDlQcQvl/55kHyREFMo6ML90610yb/yu" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "7Q0wEGWmQH9AwFRhcCJMNgnFrsho37/4EKhpafKWWOMvAGub2LfAt8iLuHck1mStJG4h3mO9eHUx5Kw2xpyFsA==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/config/gentx/gentx-240c6c414bfb15dab6e170c7b1781de818736374.json b/ci/chains/gaia/v4.2.0/ibc-0/config/gentx/gentx-240c6c414bfb15dab6e170c7b1781de818736374.json deleted file mode 100644 index 21c082511e..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/config/gentx/gentx-240c6c414bfb15dab6e170c7b1781de818736374.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1pggja8l6rcpknqwpgyjysk7a2sm9fv6rhguha2","validator_address":"cosmosvaloper1pggja8l6rcpknqwpgyjysk7a2sm9fv6rjugz3e","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"IWE690GXTZ+i+l2cqDS4AO7U5YZEViGea7WNjcxH61I="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"240c6c414bfb15dab6e170c7b1781de818736374@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AojvMkfmkHDkXDlQcQvl/55kHyREFMo6ML90610yb/yu"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["7Q0wEGWmQH9AwFRhcCJMNgnFrsho37/4EKhpafKWWOMvAGub2LfAt8iLuHck1mStJG4h3mO9eHUx5Kw2xpyFsA=="]} diff --git a/ci/chains/gaia/v4.2.0/ibc-0/config/node_key.json b/ci/chains/gaia/v4.2.0/ibc-0/config/node_key.json deleted file mode 100644 index b60e32e105..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"I7b2zBJrrLurcrW3JZ2TkCsOpHsfe44Ql5T3ZCBTU5C28pjlytBjgkuIocu5QpqrSpUuK51NzI8NG+Bq++4PRQ=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v4.2.0/ibc-0/config/priv_validator_key.json deleted file mode 100644 index 6cb3161da0..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "8D52C26DC85F80E344534DF3053969135B1DA272", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "IWE690GXTZ+i+l2cqDS4AO7U5YZEViGea7WNjcxH61I=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "07mTOftTScXOU8vtsUMSxwKXeKWOmjfXUj5YEse1EeohYTr3QZdNn6L6XZyoNLgA7tTlhkRWIZ5rtY2NzEfrUg==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/0a112e9ffa1e036981c14124485bdd543654b343.address b/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/0a112e9ffa1e036981c14124485bdd543654b343.address deleted file mode 100644 index 807da328b1..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/0a112e9ffa1e036981c14124485bdd543654b343.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzowMC45OTg2MjcyOSAtMDQwMCBFRFQgbT0rMC4wNjQxMzE5MzgiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJPSVNaOTlHUUNkcVl6LVN0In0.CZMPhgSbRxrf1aA2NdW3IWxCc99W46ckA_ANwO9aUMmYT8bDupGlfA.rXtMktmJBPPqyv0l.GDbFv3ps--6uIZdI_jEG0Ig27i-GGe12arGeDBQ0_eoMw2rY-Mb8oaMamFJtIsc2eNGrCfWLx3QetD0sp423GGgKWzlG_hoNawgL_QRRy_rwwZEJz3cbfvHRPHQv7VY0BwMCElfInRmYypIu91eME_Ui5QEHwBDVGgdF0vyunaozo2yMOHtUDEjUmh-TzjpDSUG3RTefveKYK95kFlpzCB-k6M8j5LZSz0uD_5EPzdkaL4dMcGGOyzgI.yGMc9I2f50IikMt5YAG2Tw \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/8c1145fd1c8fa0119e95466d9fa27320bfcc0c1e.address b/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/8c1145fd1c8fa0119e95466d9fa27320bfcc0c1e.address deleted file mode 100644 index 911af8519e..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/8c1145fd1c8fa0119e95466d9fa27320bfcc0c1e.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzowMi4wNzg0ODc0ODggLTA0MDAgRURUIG09KzAuMDY1MjI3ODA3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiTEZZbk1zTFNSWVp5X1Y2XyJ9.eNZUQlok-yxmYnWAAUPnRvoHBFbj5cRsERmqbnFdqiAZNZ7g-b9iaQ.hZsRJ0mY3FFR0PmG.tuODM50B7-Zx3hGWIc84Ch6SUS05UieIDpAwRk85fBe5o6LQ-COy-f_wmrSYfnet9Onp23CnLVxUuJnbw4hiAZVFIRkLN5h_aOBlQ_Dg0edvFcSLSdDDgbHuCkPG2AuygmpR749FTBWqvSInNP-4JyJNrmrpUvq6mVitXdrLRU-1c8KWDwJVuw4BcyTHGNIyT9D1aaZZaI_BApZDXzJhRHTS5PigamexjzNB2KiBHtJiUQ.25_lUD1O7rjmqUeL-9_tLg \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/f5580fd9ee89a00b0907ecf985c4febfc1672d3d.address b/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/f5580fd9ee89a00b0907ecf985c4febfc1672d3d.address deleted file mode 100644 index 508eb654ee..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/f5580fd9ee89a00b0907ecf985c4febfc1672d3d.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzowMy4xNTg4MjA0MzQgLTA0MDAgRURUIG09KzAuMDY1NzEzNTkxIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiNTNrdmxEYVJ0dmNsOF90UCJ9.9kn3dNjpXGDJ3XMxUyGUWMllP4QiKhIdrZCbG-OwjGjyRNlPwJ32fA.utuMYFv7LPDz6P1l.djEBvcAW3PgPGoWpCzGWjF14TlICbGQ6Nu3zzPMMMhhyP0Ed8MA_hF2jnC-UYrlkK03VME55c6Bt2XjJLt6gX-fKKMOoVdzyb-a4bHvywH3DwU-CtmlxwSZUxClpiTjriX13yUCdgS1HszRvIbpiWL6lZc8294PYbVJ9EzMhZkv3tSWWYGl97vuHB_1dP7cGNJvp1FOvokoYxf_pfHbh7lVHxd2CZieOfL-g-rZ9bqWFmw7-sC0.t3J9an5u-70LvsshXfFHeQ \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/user.info b/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/user.info deleted file mode 100644 index 91dc1fba44..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzowMi4wNzEzMDM1OTUgLTA0MDAgRURUIG09KzAuMDU4MDQzODk0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiN0JpWkVEdWNtX3B3cXBUViJ9.TGKX2_vYpVCPcHWwWcgBbx3OweraZ_PIWjqmOE7qdExo8QslTTEWeg.8aZQFoUOMq-OhVYj.yol_qfmJdXXTFqvrw7LzjCR_0Bm3tLhJ6N68p5xHWHsQ_GAFpGGVLqPdKN5_3dDK_ObhlDMqEBsmqS4HIYT1vxmsKd5YmdWGXY823E_cdWQ-H9_p6BBdAsxak0ckWEeVuEZAT4963viFTSMnuylgMfZwzSXybipSoI86-PuqqVweSpsG_IRln7mUOU5EruMfSvkstzLsz5gBEJ1GnUXNUHiqXfZiki9eQ4JiUYeZLEPFkB6rsYt47xUMdxT__Bbi5fh-OvlwlRyQWv8dLHgaR-QHGN1qiYh_ZPZ-HRnJ1tgirXWN5pQBK4BU6KIb3DoqbVt4ZYRSHocVWaS0cX0vkfcNN9QBERQ.wQ7RopDwE-thWkBhKyGoKg \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/user2.info b/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/user2.info deleted file mode 100644 index 5603cdb824..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzowMy4xNTE5NDA4MTUgLTA0MDAgRURUIG09KzAuMDU4ODMzOTU4IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiQkpVWThIdS1JSi10ZFVkeSJ9.CzGLBhnREbPwATFvjx5OsQ3xcvw6RU3BMJv10aao28PdpTspHzcKxQ.c1JJAuOzl_TdiaGg.ZsG0fQmAoF0j_WAlMZbVB3rhc2scGkYKxewcR8wyMZ792vZtdys-whsqLhv8NF0jc0HyIiS4gk_1E-QCatZl-cMWjjRZaCtk599aYA5CRKOEFAfhLond0nGVEjM6HbL5ChgAulyEsq74DQnFoyrO6uZN0usI8G-9ghPFehRAB5Fen5crxROWO-Dr3Rs_-DLLCB1D_jLeWGSR6V1p4pFH0kYGoJ504FuarV_hPqQ4ftwRPFSDVDtiSun0Lec9TLd1a0VzaAueINpZl8ueVL0qxZY2BWsedU4QZjYDLxsxWq2aQY9VSmm3rJfpBLyMLfHr-HaWedfeqIrvnusKIGKgDdpxI0lCU0jy.V7H5m657PtOwyuLaoJOycw \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/validator.info deleted file mode 100644 index 0589f1a12e..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzowMC45OTE2ODEyMjggLTA0MDAgRURUIG09KzAuMDU3MTg1ODc3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiWFRYNkVfdEJ1MUhXbXhzUCJ9.PphVXieI66NRMofk3D5FuOz3KwyZVlBdEBHiM-BWL5vf09ZgZOi_Zg.BqzzLuv2WbgZ3LbV.gASPxItpiGSMFeBdFyoVU_f4y-gflUKWmRWh2wEAnyI6J0wYPv9QixO4l-WEFPbWyebv0rBeZv8vz80xo7GH7W67CI-A8zpAgLaH2sULflU9Aqs84-MGQfdogFeoz46TsHJHEbN9Qsxe05be2eF-RWzJTeeVdJYSuo133IHJCc3EJesgYpn0Ve1_WjxYL4egHLBMaH9DJ5clNl8D01D8R0Vd_bjyiNqkXbSkcYTKOi3wtgyQ_SxjBkg981yANpLqjzEnKy4MuiWqw09BYOkHZC3xHBzpACHrm4bWIF4DzMHMsA7I-LI1finIf61K6dFGjywJ31FfhUE8-l6iTXFsQ4yf8fnur_Xyh3PUxxp4IKkBtJ-o.w46r1h6ErVI8ut26RUuPKQ \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-0/user2_seed.json b/ci/chains/gaia/v4.2.0/ibc-0/user2_seed.json deleted file mode 100644 index 1cec6d4120..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos174vqlk0w3xsqkzg8anuct387hlqkwtfahqu9j8","pubkey":"cosmospub1addwnpepqvs7pf9n7gexuyw424aptzypnj5rtjt70hsdrwf8gejd7gtpswgm793a7zs","mnemonic":"engage verb play circle divide large identify ceiling gentle myself tortoise decrease matrix leave captain want deposit credit gossip warm denial copy food derive"} diff --git a/ci/chains/gaia/v4.2.0/ibc-0/user_seed.json b/ci/chains/gaia/v4.2.0/ibc-0/user_seed.json deleted file mode 100644 index 72edb9690c..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos13sg5tlgu37spr854gekelgnnyzlucrq740y9us","pubkey":"cosmospub1addwnpepqvcrrazdvt6knarm90hr93c6a685hp64cvvx6t75r940n5uxw9nqxp5fvt6","mnemonic":"isolate inmate used fitness smart delay tower famous hotel lift blame because uphold record stairs million acid taste kit together genre win refuse anger"} diff --git a/ci/chains/gaia/v4.2.0/ibc-0/validator_seed.json b/ci/chains/gaia/v4.2.0/ibc-0/validator_seed.json deleted file mode 100644 index bcf04513df..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1pggja8l6rcpknqwpgyjysk7a2sm9fv6rhguha2","pubkey":"cosmospub1addwnpepq2yw7vj8u6g8pezu89g8zzl9l70xg8eygs2v5w3sha6wkhfjdl72u89slhs","mnemonic":"return crystal shoe ceiling elite pen include hurt original boss garment code lawsuit auction people swarm horn shoe zoo stereo crop sleep caution struggle"} diff --git a/ci/chains/gaia/v4.2.0/ibc-1/config/app.toml b/ci/chains/gaia/v4.2.0/ibc-1/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v4.2.0/ibc-1/config/config.toml b/ci/chains/gaia/v4.2.0/ibc-1/config/config.toml deleted file mode 100644 index c32f66cf63..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/config/config.toml +++ /dev/null @@ -1,393 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v4.2.0/ibc-1/config/genesis.json b/ci/chains/gaia/v4.2.0/ibc-1/config/genesis.json deleted file mode 100644 index 07644cdfeb..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/config/genesis.json +++ /dev/null @@ -1,314 +0,0 @@ -{ - "genesis_time": "2021-06-14T13:13:13.056631194Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1fvfhg5hj208gackwmqch4zmwtg924em2xg55r2", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos14qcemug6e6c6tnwr42ewfn7fhju00quv5uk6z3", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1w7wq24hmvel4946ekaql0v56n8rlpuv9ns6ctv", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1fvfhg5hj208gackwmqch4zmwtg924em2xg55r2", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1w7wq24hmvel4946ekaql0v56n8rlpuv9ns6ctv", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos14qcemug6e6c6tnwr42ewfn7fhju00quv5uk6z3", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1w7wq24hmvel4946ekaql0v56n8rlpuv9ns6ctv", - "validator_address": "cosmosvaloper1w7wq24hmvel4946ekaql0v56n8rlpuv9kywd8l", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "oUdI91TXlwS/qQXrrc/aLb9u/RESrYAhKKJY9iFyZ4U=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "69dd5b43951124b2e6e2d744328041a907f3968c@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "Aqfj+Tm7Sin/Uxw4PObXXqN8OpYJH2O92T+IOLSj77Th" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "5mu7NcH4HWs9Nx7tAqLMjjrAG/EBOIHwByB/x4CFi6ZPoHo7YnWwDMKWrOK6z5Adays/rr9l25/p3lVzZ2mEvA==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/config/gentx/gentx-69dd5b43951124b2e6e2d744328041a907f3968c.json b/ci/chains/gaia/v4.2.0/ibc-1/config/gentx/gentx-69dd5b43951124b2e6e2d744328041a907f3968c.json deleted file mode 100644 index 7b398f46d4..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/config/gentx/gentx-69dd5b43951124b2e6e2d744328041a907f3968c.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1w7wq24hmvel4946ekaql0v56n8rlpuv9ns6ctv","validator_address":"cosmosvaloper1w7wq24hmvel4946ekaql0v56n8rlpuv9kywd8l","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"oUdI91TXlwS/qQXrrc/aLb9u/RESrYAhKKJY9iFyZ4U="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"69dd5b43951124b2e6e2d744328041a907f3968c@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"Aqfj+Tm7Sin/Uxw4PObXXqN8OpYJH2O92T+IOLSj77Th"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["5mu7NcH4HWs9Nx7tAqLMjjrAG/EBOIHwByB/x4CFi6ZPoHo7YnWwDMKWrOK6z5Adays/rr9l25/p3lVzZ2mEvA=="]} diff --git a/ci/chains/gaia/v4.2.0/ibc-1/config/node_key.json b/ci/chains/gaia/v4.2.0/ibc-1/config/node_key.json deleted file mode 100644 index 1d542369f4..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"YJV/qvkkALKFIQeZCJMDVGXxb6MmHpUPFjWkhhSD5V9jaJsfupWnsJH+PvOGvlHJGvjKX1plnB9N2l+T655b4g=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v4.2.0/ibc-1/config/priv_validator_key.json deleted file mode 100644 index 786b60801a..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "70FF5C5D426D127BEC43AA1AB3FCDFE3862E4E4D", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "oUdI91TXlwS/qQXrrc/aLb9u/RESrYAhKKJY9iFyZ4U=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "muN9d4dS7CR/WEXEsNc85JbqgEyC4RLbNssH3v+IF2mhR0j3VNeXBL+pBeutz9otv279ERKtgCEoolj2IXJnhQ==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/4b137452f253ce8ee2ced8317a8b6e5a0aaae76a.address b/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/4b137452f253ce8ee2ced8317a8b6e5a0aaae76a.address deleted file mode 100644 index 82d1370527..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/4b137452f253ce8ee2ced8317a8b6e5a0aaae76a.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzoxNS4yMDkwNDc2OTQgLTA0MDAgRURUIG09KzAuMDY1MDcyOTE1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoibmVCandtSHFyQlF0Z0w2XyJ9.BTl2Woy2rvSdMNL-ADRy9BtSPCNnRM2J4NKXx44NSW5YjJCl79hOgA.O6V5W8isgg82NWZe.rNXOtM3r3Z-nYKJkY6j8tE5MDCKeiSUIIhfFhjrmzy5u7KdjGQaXAO4Hb9aYqXnh5GiymD_SN3xSGFx1KElCFY8jm7FMdluvsGFQ5UIgP90c6wWqBubZ4iNboRrQ3qn6B7tcDwe2UBGFdb6q9rub9sf3AqOUDsGPMe26613oQbk7lRUPYGfbVYk532JCCFojIDftg9F3XlhPA3r9MRZW7z9jD1n4Iv-PmBvXKJTa5CZKEA.JB9sZ58QQODQHkd2vo_I4w \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/779c0556fb667f52d759b741f7b29a99c7f0f185.address b/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/779c0556fb667f52d759b741f7b29a99c7f0f185.address deleted file mode 100644 index 84691d39e9..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/779c0556fb667f52d759b741f7b29a99c7f0f185.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzoxNC4xMzAzNDcxNDIgLTA0MDAgRURUIG09KzAuMDY2Mjc3NDcwIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiQVZSVGVOanVmYUFjUW5XVCJ9.Kg5WAienYLoK6Zqf1lD7EY0b03IIpC0R3a0YClewgyldQcbp_ISvYw.gw0rXmdDXySw1jip.OLke5XiNiCpKqtQLC2iAr_4Id4iusXIH_dzHGoirlw22nLt4OMsPbmVQeRMcNDrnK5VSlJXXFPvizqqlgJutwz4yFMTifJ4azGUzXUnYNaABe8r8Y2UfRQMLM5x4ySFS1Cb55C6eCF7DLVd_9Vv15VOeEEDkACZ-kzGOwbI2JkYpXqveklAf1PLICtIDuafYyW9Rtr59d14BTQDKlJGlQcA8bCuvQxewh_JDxvCOfP7oEUlzU1fNl517.d-yhzyHzDVy3jyaMqcaw7A \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/a8319df11aceb1a5cdc3aab2e4cfc9bcb8f7838c.address b/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/a8319df11aceb1a5cdc3aab2e4cfc9bcb8f7838c.address deleted file mode 100644 index 33b21e2606..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/a8319df11aceb1a5cdc3aab2e4cfc9bcb8f7838c.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzoxNi4yOTAxMTQwMjEgLTA0MDAgRURUIG09KzAuMDY2ODQzNzY0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiNWJMZWYwMkJLWXFiRDl0NyJ9.4EVMebPoQfn0yGMEFR9u-PTdO2UKdoFfY7vSwfILVDqiE2VHeezWRQ.KA3J7bJHbTxI9ASJ.0dFNohEbV4PD2tlow_N5knExrP3DHzcYkZ_g1LB-RIwXsZSD0Ywhh3YxgZF662YvGYYvaZ4IrFmVFJ149eRdI80i35m7uAoQfJdQdZlhGk-EwlElOUrO8ZFAQikvFd7u0z6hGSXKz1kpe2ATSvH1ExM23Py2OxursQ1r8ZF7ySCC1l3yK1wiT8Xhd2VWYX0svExcQ2LOTafewu8OmC49w3J8R2h-jMJLC_gnSn-7VYUB7d5zvkI.wnIdgljYgH8fQOBu3WCf_A \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/user.info b/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/user.info deleted file mode 100644 index ef17944c9b..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzoxNS4yMDIxMTM2ODMgLTA0MDAgRURUIG09KzAuMDU4MTM4OTAzIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiRWNGRFZaU1N4UEF3Y01JcSJ9.UsJ_17tC0mifeG3WCHDQMTF0R4pAgEUavyaUEyJpvh8llc0LepVMMg.t_Kpzpq-jsb-rGak.w1B31ctpUT04gDoqed4b5cp3Z7gR5JWqTfxFBvqJgS6Yi9vhjdkguvKp9L2MfefwpK6EYbUC3i0VpPLOUY51RfciA4Ap58WeQSYBDLlF6RG_lbKCmv6_FEJxR742J7CCYqKZhU7slGmYXZPORZWsv6Cp6Ba7aJatlTG1sqiU6394mdFHGQkNXAOTJTqSSA99ajaWm-0Qeazp1RZRY1K0mdjxd-JkT8MRiNPpLeh0DRnIGJ5n94z7afNTFZ2Fb2U2QbaN7LujJAjw1JHA7Nw5a0PQjUH02Wp5femiG8i5e41ME5oGvU-YuX801_-VafXzh0uzv6RfD6tq1i9dRJ-beGVQFKxLWL4.mcR8HWvCYlRnLLYW0wM2lg \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/user2.info b/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/user2.info deleted file mode 100644 index 59b1e80368..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzoxNi4yODMwODg5NzYgLTA0MDAgRURUIG09KzAuMDU5ODE4NzE1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiRGNkNjNHWGZNLVktd1ppbiJ9.bieeQsp_9NDEx6PL5z15KR7HAm53spmwmylcayAcZyFXjHNqe1r_OQ.BnYcBQhGjPBav1MU.mVTHXLCelV1YPfZYm9848FR5STbC8Xm1kQjPgOWZ78aAa1VGbD6qsvlnTNhi7t1MIzPnMAy-O9sbsrtNFimuGe8PpuGD0pVeIwe_rE7w335lCFJ-y_GFeSRNBparwPFHpy08DEB8asozIIj_O3cVjcW1tUIcqLl-5XoePhKZZkchXUOYvDEquCEe3ZKj8PW-gFyEtjn4vK5JFRgj-LmA3T-bvHg4V7jPkdlITuecqwuHh_OxUX19jHtK6vm1YHA9V0FLyEMoX4l0G728UxxbS1xPB48oQNty4cIgLRmxnztCU52bSpEDq8W5VfcU5hNQpk0cL2lh8K7FyjStc79svZTSCIY7dINW.6SE8M1RrjHBghHh3iUDq0g \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/validator.info deleted file mode 100644 index 0e0d63b85a..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wNi0xNCAwOToxMzoxNC4xMjMzMTE4NyAtMDQwMCBFRFQgbT0rMC4wNTkyNDIyMDYiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJOcmE2dTRqV0NWMVlLbnd4In0.JlwFC_IeZIvb554blz3jrw8-k1CiXyHh7cR71u9M47hpylHmh3mZFQ.dwgvcK0kSJu4TkYJ.TSxVfxFpGvPV4fPx-l7agEGjQ_dxsoRffPJG0YoAdKG8WA0qTKofGWoVgg5jyMuBfOFUMVAPXWhnP9Gr3pCfa4SldrEZ2OoXYYtB47R6XBpZBfAXsiyioj4580vfrmTFQfPUwocBYZFja9S8G2kmP_zpP3tv3mOs5q_16fQ5UxK-kI4_i69NzL_F32iYgo85tUAjkrTGR55xBP9uW3Ev5t7AoKdozR9zjSm8MBgAX5XsuIFGCue74NqSZiynSA3prDNDBJ2ATo4ccNDUmz12RwlkF-D7bIlNzKTCFvMqLrqgHqDKmCnrhd_YQhrZwjlLBQLzZAbMbLuaSS4cBjb_mTV406dbYBuygVR3iOxPAJhYjhwu.UjRwllvnVNPFbMDvsNno4A \ No newline at end of file diff --git a/ci/chains/gaia/v4.2.0/ibc-1/user2_seed.json b/ci/chains/gaia/v4.2.0/ibc-1/user2_seed.json deleted file mode 100644 index b175597f92..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos14qcemug6e6c6tnwr42ewfn7fhju00quv5uk6z3","pubkey":"cosmospub1addwnpepqdg569jjx96hyx46053u3ks6uk8lvnkh9xnqkdyevhxvqgfxuh2p52aw30k","mnemonic":"miracle index era tobacco birth submit powder echo bubble mandate chuckle knee jewel hurry session narrow believe cake boring cancel medal announce mad bronze"} diff --git a/ci/chains/gaia/v4.2.0/ibc-1/user_seed.json b/ci/chains/gaia/v4.2.0/ibc-1/user_seed.json deleted file mode 100644 index 7edbb11221..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1fvfhg5hj208gackwmqch4zmwtg924em2xg55r2","pubkey":"cosmospub1addwnpepqfakz5yae5sawd6muhrczgr6mvdd0hqsdhc07c2ua70vxl4xcxwssw9dvya","mnemonic":"satoshi shield jungle victory danger amateur design push leisure current couch capable moral october glide design erase hen divorce venue devote fresh early exchange"} diff --git a/ci/chains/gaia/v4.2.0/ibc-1/validator_seed.json b/ci/chains/gaia/v4.2.0/ibc-1/validator_seed.json deleted file mode 100644 index 51cedcf7b2..0000000000 --- a/ci/chains/gaia/v4.2.0/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1w7wq24hmvel4946ekaql0v56n8rlpuv9ns6ctv","pubkey":"cosmospub1addwnpepq2n787fehd9znl6nrsureekht63hcw5kpy0k80we87yr3d9ra76wz05qrfn","mnemonic":"dentist display eagle twin apology joke alpha timber bachelor quantum old enough extend average caught actual distance trouble helmet real raw illegal time slim"} diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/addrbook.json b/ci/chains/gaia/v5.0.5/ibc-0/config/addrbook.json deleted file mode 100644 index cce9f7f27c..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/addrbook.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "key": "d551bab9ec950b3c71733c80", - "addrs": [] -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/app.toml b/ci/chains/gaia/v5.0.5/ibc-0/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/client.toml b/ci/chains/gaia/v5.0.5/ibc-0/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/config.toml b/ci/chains/gaia/v5.0.5/ibc-0/config/config.toml deleted file mode 100644 index 071a9c4199..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/config.toml +++ /dev/null @@ -1,400 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/genesis.json b/ci/chains/gaia/v5.0.5/ibc-0/config/genesis.json deleted file mode 100644 index 0fd111e818..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/genesis.json +++ /dev/null @@ -1,342 +0,0 @@ -{ - "genesis_time": "2021-08-06T15:55:09.233573359Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos18hxd9gnkkfp6du3479jrpw69s2ycjwpd5kwm3m", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1fk5g3yrm6resm26esff2a2t28t5hh9lwv65hdr", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1m9h2rkke0h2qw5wpmr7d2a7nanxg8prankvar9", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos18hxd9gnkkfp6du3479jrpw69s2ycjwpd5kwm3m", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1fk5g3yrm6resm26esff2a2t28t5hh9lwv65hdr", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1m9h2rkke0h2qw5wpmr7d2a7nanxg8prankvar9", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1m9h2rkke0h2qw5wpmr7d2a7nanxg8prankvar9", - "validator_address": "cosmosvaloper1m9h2rkke0h2qw5wpmr7d2a7nanxg8prakzcg0k", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "UWzLnjUlTmMPHJISY+GWB5I4wJKRjMGzNf/o0rB50vo=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "0a67e76f6743997bde26b522ef43e886b1baa95d@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "A8om4wMSv7tTzlhjxv9wF/Pc4SZTZh1tb6kqRF4OOBJp" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "dbccwhF7NQmGAHDr4+5JiVlNL/KqevVYSFCIGlSqYWZ9QOSxSJ+J40pzGWsaAyeVpo91xnQkl8CpuTofBzfXkQ==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/gentx/gentx-0a67e76f6743997bde26b522ef43e886b1baa95d.json b/ci/chains/gaia/v5.0.5/ibc-0/config/gentx/gentx-0a67e76f6743997bde26b522ef43e886b1baa95d.json deleted file mode 100644 index 36cc8ac3d0..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/gentx/gentx-0a67e76f6743997bde26b522ef43e886b1baa95d.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1m9h2rkke0h2qw5wpmr7d2a7nanxg8prankvar9","validator_address":"cosmosvaloper1m9h2rkke0h2qw5wpmr7d2a7nanxg8prakzcg0k","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"UWzLnjUlTmMPHJISY+GWB5I4wJKRjMGzNf/o0rB50vo="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"0a67e76f6743997bde26b522ef43e886b1baa95d@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"A8om4wMSv7tTzlhjxv9wF/Pc4SZTZh1tb6kqRF4OOBJp"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["dbccwhF7NQmGAHDr4+5JiVlNL/KqevVYSFCIGlSqYWZ9QOSxSJ+J40pzGWsaAyeVpo91xnQkl8CpuTofBzfXkQ=="]} diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/node_key.json b/ci/chains/gaia/v5.0.5/ibc-0/config/node_key.json deleted file mode 100644 index 1ad4894060..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"XSr6UzSuk0yJGLPr8HNJmOvR0Ll4lXd0Q1f3JwCTsarMfTCiPkwdB7XuIaUYZ+EqPMQlGswfcX1MsmQnwT7tiA=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v5.0.5/ibc-0/config/priv_validator_key.json deleted file mode 100644 index 5acf485cbc..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "3CE3FE24E325D665404E83E08F52B6F690F53696", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "UWzLnjUlTmMPHJISY+GWB5I4wJKRjMGzNf/o0rB50vo=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "aHKU0D6vWVoOY/yGbG99aTwunQw/OLIWSXq+RnmLihRRbMueNSVOYw8ckhJj4ZYHkjjAkpGMwbM1/+jSsHnS+g==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/3dccd2a276b243a6f235f16430bb45828989382d.address b/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/3dccd2a276b243a6f235f16430bb45828989382d.address deleted file mode 100644 index 0ee685e76d..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/3dccd2a276b243a6f235f16430bb45828989382d.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToxMS40MDg4OTA0OTQgLTA0MDAgRURUIG09KzAuMDcyNDk0NTU0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiVDJ0N0FLQlU3M1dFbU9vVSJ9.c_tFqk0Us6om3ARhb75Nc6Sbfr3pKIoAMN9iFbMTnxZClTMEjWQlrw.p3d8hSE0wlkjQJFq.TVz3yHFFHaaYbe2Tvxm2ZjFXHZchdMSNmOwqQpOx9kEA7E-lsDJtMTVZ67CHca6hONRQNN1qNGwmnc_qGt4EsFFkLSs8FoP4CbhZbpeiFP8hCGifaJeB3V-7rB1QqaCHXv6bWjo22J8R4Ab2FYSsmIzkVDsbJ0bwF6X1zvy-HK31DaX-7pXiZ_ZpHPLPulbNkuQOuw1Ko60pn7gddsd2KtVb8XGdfF2AwPbwm1JwJat1dA.6gvhCUm2fYsEaC83zEaArw \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/4da888907bd0f30dab598252aea96a3ae97b97ee.address b/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/4da888907bd0f30dab598252aea96a3ae97b97ee.address deleted file mode 100644 index 10aca78a47..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/4da888907bd0f30dab598252aea96a3ae97b97ee.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToxMi40OTEwMDU5NTYgLTA0MDAgRURUIG09KzAuMDY4MTU1MzM3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiV1FRdjdLTVJZTlh4NF9kZyJ9.sYBSSj4WLgksCMvTmINlA29D5xOghK-f2MAW1jFdVdIRhjKJ8pvFVw.zVwXJihE_WugBihe.ZYweIxXQ4BfXomdUiYjrNsmBn4N1J2SqHeDTC21Od6oy-tjq5xOMxKSDCqbPl9828txXEB9eCOzCkDMQkcEz7dYWGjQcBkGsln9JefEK_xchdSmeK3pfy2hVpQvqtt0OO8EsDlcIsm6gGloTq7ACJ9vJCtTiP2H0gTdNTqmX5ERRvy6KCVypZSCvPg7znXCTeW7vG8x7be9VA33ZT6KJIuphk91EamFFYOqJPQ9konae3u9OKP4.9gG1lM3T0U-TEVvH1NHF6A \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/d96ea1dad97dd40751c1d8fcd577d3eccc83847d.address b/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/d96ea1dad97dd40751c1d8fcd577d3eccc83847d.address deleted file mode 100644 index d97809d0a3..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/d96ea1dad97dd40751c1d8fcd577d3eccc83847d.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToxMC4zMTkyMjcyMzYgLTA0MDAgRURUIG09KzAuMDc2Mjg1MTUwIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiZnZiVzhmdERKcTJWdEdseiJ9.5XAKEXhLRZMMdbEJtqUUTcXqwkxICMQUfYnKdzyUmO6oItkgvgSAdw._w8dTWEfuIPQW82G._kamePXu2Pkx3reyUjSK1Abw4gn199CXbP8YHU1Zur5cu4P9tVIen3-tu0VJ9ou8sA-QUVdvcG3_kVxIu1mvcUdgQOGJidA6dndOuPevkFDWAvvtnUQaTKBzdiQaM3e7arGMgjXvFJlDM0IrCjmBBHiBCG7_kx-VLF-FC4D9OvypINRk-d-HVCpAOHfyrdMj08EcpRvpFQu0Q7WcEaZsl8Wl0u-TdQslkVZupPXSNfT79qxg1LwLvKzY.UA2-tzWmzhuGS83LLNFTJQ \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/user.info b/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/user.info deleted file mode 100644 index 6378434dba..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToxMS40MDI1NjU5NCAtMDQwMCBFRFQgbT0rMC4wNjYxNjk5OTYiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJFam1uQmRUT3VHOVJFdGRuIn0.5BCtBSgTthznCJgVzxwvR6LV8-AsVAfkKJgaR3MahCuhcKjgM6H7ig.qO-A9NhUkrc3OcZM.lHVVATkQuFo34HYnmBu5r5uOeCt9FfYpArvr4SEwaltfzucjVPkunpNTG6VEuVkM-QNo86pwTi-nx3K-QydzkCpcjYM8vn-R-HuROwKp2yDgRdNr-QK9XLoYJqL0rrDabpwxJnyghuth0D6iJOlTkZyGn6Gkl7GkF3ewI7ny9qKwdTwv5AkbNI4UmN1cEDikFUgJ-EdtvjZB6iy3DsBuacXQspNZLw4IsVNS6G5y3ODBomdt4VrBlsYmfKVO_UFlmnuAhLHUbRw6pCibQgWhWll1IoC0zx491fqoNX049gFfJN2uDcCX7vy5svG2__4omQvBdCr5QtRwsvCFZPChNf0vuLmNLhg.di2QoHAwspqXNXe0Kwzm1Q \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/user2.info b/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/user2.info deleted file mode 100644 index fde543a535..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToxMi40ODQ2OTQ0NTggLTA0MDAgRURUIG09KzAuMDYxODQzODI1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiTE9BMXRvanItQ0lsZHZjdSJ9.QvxZZ5KouVuuw39bKORfHDJRGSep8hdZQyuP-Xuvxu2pmfYYQ_1YYQ.W7y3-VIQfHW6SsWs.WlSMpDVl0Ej8pVq2HpoxiyEVZSdWygRWFHSM-zteFePNLjYUymhtg3jGx8s3jX5d6NwtX0PxmPVYYClLNMKWDHJH9GC5AfZgUCoCPeya0VkmOc9V9u8hBVBMDisnDCkE5IRlQzi5bd_hf-AJH_8vGNHvqsI9mtoeLF-BNAFTma_MUUd5wJNSxZYL3rsnhPmzBvNklAf3noCKQmmnIpNenMP23BwMSyki6y3pbYyJrERuo7noqHJ749vL2QG7AuogM70FUfiAABJXkL3EVZX7lOXJLxx6GCOlWMTLrAQE9bzxbCSYDISlmEfQgjhRgKPzGRqb8um98nXN7aa0eYGWL5OEiN7RBPTd.Tq-m5jicmGvDOuUNI2B_IQ \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/validator.info deleted file mode 100644 index 3f1a7adf11..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToxMC4zMTAwNzkyNTEgLTA0MDAgRURUIG09KzAuMDY3MTM3MTYyIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiNXZDekQ5cFBPbHQ3T3E1ciJ9.LQcsbodiuLHFDBxBMqGqec2nsjt3GFMeV4ePzX4YyGJz7bUyoCw3Mw.m4YUYOicLq53eixI.dPpXWczIiJnumz44yxHMkaU1-tYYjPsN9Sn_9PC_c8T3mDXPWCLWdaywYif8BP1jpQ__tF8iHh6HiOO9r6yp3mH6SGodkj2ey3FwBQwBwQ0L7N1sx6D4ASCaBaAcsjsc2aHew427LyDJhpMoW6luf4XzzzJsCRN0QXBWtpdffE23JAkahgIgrHARAKxYyqRkqRnesItEjJZXwjobm_ENEcyaUPdg7N5Ys3VDEp55FySa7shyBABprk4BdSu8N9ivaz6nk0sjvIzEDuINMCg9JdneErkIyr33mebIhAzaK3Aw6i0Umg1BTYvruai2eMp9ryh8nryMVrVDhBnf8DHaEkfiKUDPrP0rvzgAyjpWAleKrrb1.w1S63MsoNXdNVFgRuo0DCw \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-0/user2_seed.json b/ci/chains/gaia/v5.0.5/ibc-0/user2_seed.json deleted file mode 100644 index f14517d186..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos1fk5g3yrm6resm26esff2a2t28t5hh9lwv65hdr","pubkey":"cosmospub1addwnpepqgssups8ywx8rp2h85xt0y7cfunpeadzwh9r5ss9w6awask3q257zld37vn","mnemonic":"dynamic life add hill bundle hold tone square damage library wrist news used text circle imitate walnut control curious cactus canal employ load half"} diff --git a/ci/chains/gaia/v5.0.5/ibc-0/user_seed.json b/ci/chains/gaia/v5.0.5/ibc-0/user_seed.json deleted file mode 100644 index bcba356514..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos18hxd9gnkkfp6du3479jrpw69s2ycjwpd5kwm3m","pubkey":"cosmospub1addwnpepqd4gesy8r0wyvay4a2m3x34xlhnds853tnhal0mwnzxdmlls3lukk4gwgjs","mnemonic":"regret area gas slab game feel corn picture click match myself flip fragile shuffle field toddler rack cycle shield blouse cup prefer depart picnic"} diff --git a/ci/chains/gaia/v5.0.5/ibc-0/validator_seed.json b/ci/chains/gaia/v5.0.5/ibc-0/validator_seed.json deleted file mode 100644 index f816aa20ac..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1m9h2rkke0h2qw5wpmr7d2a7nanxg8prankvar9","pubkey":"cosmospub1addwnpepq09zdccrz2lmk57wtp3udlmszleaecfx2dnp6mt04y4yghsw8qfxju3rfl3","mnemonic":"robust nerve resist tip spoil grid poem invest unfold wrong helmet change poverty network popular strategy vague consider valid shrug salad health flush staff"} diff --git a/ci/chains/gaia/v5.0.5/ibc-1/config/app.toml b/ci/chains/gaia/v5.0.5/ibc-1/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v5.0.5/ibc-1/config/client.toml b/ci/chains/gaia/v5.0.5/ibc-1/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v5.0.5/ibc-1/config/config.toml b/ci/chains/gaia/v5.0.5/ibc-1/config/config.toml deleted file mode 100644 index bef1cae571..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/config/config.toml +++ /dev/null @@ -1,400 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v5.0.5/ibc-1/config/genesis.json b/ci/chains/gaia/v5.0.5/ibc-1/config/genesis.json deleted file mode 100644 index 5857a4c3fa..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/config/genesis.json +++ /dev/null @@ -1,342 +0,0 @@ -{ - "genesis_time": "2021-08-06T15:55:22.411963459Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1gz8hnf4l6yz7k3mmyseus3v69t4f8s60hth0fx", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1lt47c46ydlmqs6tq5y4djkndx9u8mu3x6mqhje", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1xs9mn3pfxgx4s2ncu0yejl0dh8lrquyzekqwz3", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1xs9mn3pfxgx4s2ncu0yejl0dh8lrquyzekqwz3", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1gz8hnf4l6yz7k3mmyseus3v69t4f8s60hth0fx", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1lt47c46ydlmqs6tq5y4djkndx9u8mu3x6mqhje", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1xs9mn3pfxgx4s2ncu0yejl0dh8lrquyzekqwz3", - "validator_address": "cosmosvaloper1xs9mn3pfxgx4s2ncu0yejl0dh8lrquyzuz5mwz", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "uEJtNPYlKV1WAsKJV+ntucge/3FNb23OT74fNR4SB4A=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "1feb1ad566fa381f6e6b4afd639559e0505fd9d0@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "ArYgz5c42qZWEE37Ltf/g1ikRbRjlED+9u/9uPzzeWp+" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "licdn52xp1gR8oNcVzqZAWwDaOsdGK6A/+B9bkw8K/hakKmHiTe9wRXnOD3m532E5SYyPkK93vukLEhPbtx+wg==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/config/gentx/gentx-1feb1ad566fa381f6e6b4afd639559e0505fd9d0.json b/ci/chains/gaia/v5.0.5/ibc-1/config/gentx/gentx-1feb1ad566fa381f6e6b4afd639559e0505fd9d0.json deleted file mode 100644 index 06efed20fc..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/config/gentx/gentx-1feb1ad566fa381f6e6b4afd639559e0505fd9d0.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1xs9mn3pfxgx4s2ncu0yejl0dh8lrquyzekqwz3","validator_address":"cosmosvaloper1xs9mn3pfxgx4s2ncu0yejl0dh8lrquyzuz5mwz","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"uEJtNPYlKV1WAsKJV+ntucge/3FNb23OT74fNR4SB4A="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"1feb1ad566fa381f6e6b4afd639559e0505fd9d0@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"ArYgz5c42qZWEE37Ltf/g1ikRbRjlED+9u/9uPzzeWp+"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["licdn52xp1gR8oNcVzqZAWwDaOsdGK6A/+B9bkw8K/hakKmHiTe9wRXnOD3m532E5SYyPkK93vukLEhPbtx+wg=="]} diff --git a/ci/chains/gaia/v5.0.5/ibc-1/config/node_key.json b/ci/chains/gaia/v5.0.5/ibc-1/config/node_key.json deleted file mode 100644 index 3405e42380..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"gPIctCwq106NQ4vqEEFczd1lw//4MoNXqV4bPHEZKIepCHj5EMHC8YlvENm0Ge0RPexjMxgANTrbfU2Lb+tmgw=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v5.0.5/ibc-1/config/priv_validator_key.json deleted file mode 100644 index ecf31ea63d..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "C2BD2207A3FF8B7BBBCF3E1F3554FD4CCBF5B9AD", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "uEJtNPYlKV1WAsKJV+ntucge/3FNb23OT74fNR4SB4A=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "8AatfvD3y+/RBRVCjA+OsegXGdiYg2afP4D2NmqYCJu4Qm009iUpXVYCwolX6e25yB7/cU1vbc5Pvh81HhIHgA==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/340bb9c429320d582a78e3c9997dedb9fe307082.address b/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/340bb9c429320d582a78e3c9997dedb9fe307082.address deleted file mode 100644 index 1113f7071b..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/340bb9c429320d582a78e3c9997dedb9fe307082.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToyMy40OTE5OTA3MyAtMDQwMCBFRFQgbT0rMC4wNjk3MzYxNTkiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJRSkhjMnBBR214bnY2aWkyIn0.4zfntRhv4PSLNCRq3eGlC3mKHhIsSveK1tKoxtDZINfuPicEOyq1og.9IeSNTJ5u3lMmiqS.4ehEfNzu0HwJyquVTY10QMOXjZnZTHvtuNH7jk_KqNNXqatsNg1rppABQUr4HjTFymGCOb0PzFWzLz8_4Fv5JwqYoU7j04G5uvvLALi6OVaguuEVs0B0qaD32_J8OTV9Bp8zbYr3dJHwPM6n7ntH5zIvQgC6kVaZUipglJAQjkZN2h0u2WHQ1Z0GPIeeE00dX52jJtgY0zhZdA_p1ZYAO_AJJ_fQQMfHkV10rnbErYHPFCqbawhP6ZIV.0NWwcw1QgdcihcSS7XyOdg \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/408f79a6bfd105eb477b2433c8459a2aea93c34f.address b/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/408f79a6bfd105eb477b2433c8459a2aea93c34f.address deleted file mode 100644 index a3ed0bc21e..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/408f79a6bfd105eb477b2433c8459a2aea93c34f.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToyNC41NjkyNTA1OTUgLTA0MDAgRURUIG09KzAuMDYzNjAyNDM2IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoibTZ5UGlIMS1zOVd1WmF2QSJ9.qRrycHfHVdBkFepPKyfZH-7VpIjVqJxcHiquC-heLERJqKOfwZKD0A.ESc_YX4SF-KGfXX0.81lArzJU37dL_0U6QX2aU4dZIOcVJqdu6pUscTwIWO12lbYMZnEt7QwVVxGFEpCb81enwmSOFyZI03mWloeaiVDuCPsOWjY_H-ivvGV1EooeaWynnyJGEpO2fucneP11szaJO2j2Pen9x9jgQdvn2Xuvr3gi4O6jMyiHnV8Ibmx7x80cwMpoQF9uPtodIG9aP7N1xWHV6_l_O-emv9qfnuTuLwiPGYB7jsHNXaDJfUZ9-w.i1aJxWM7HoDFIirXbYZuKQ \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/faebec57446ff6086960a12ad95a6d31787df226.address b/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/faebec57446ff6086960a12ad95a6d31787df226.address deleted file mode 100644 index 8c3f521aad..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/faebec57446ff6086960a12ad95a6d31787df226.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToyNS42Njk1MzAxMDEgLTA0MDAgRURUIG09KzAuMDg1NzIwMjA3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiYXVLSVRmbGZZdm9DV2tURyJ9.ndHS7gKlrYCO78G9ZIpB2jEqdv85zfQRZAJoYa-00hRZlHEr7dF7RQ.F--xWwL-JrjkxipY.jv_CYB2Jg_ppO9hr_j7oyTvyFy0gzvLiBDRDmRXajJJXvm-4Rx_oxx1hH-MQGF-mhmOhXn_UPB-NsVl3S6YrFq5C7BxHpa-H48yCQ8YHROrb44TmgWf26lDWRKyGdGH3AkKiUtxRGQ968vfNOLu5FCPcK1zysHDrmLMlN88c2NLNzE8F8F_iAWWwuE6Wt0vpm85qtsAyX73pTi-NracluzPMoy7XqAguUAHWI4tod2h56-H2Gls.Tt6Q5d4jNAXdwvXJk4QUTg \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/user.info b/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/user.info deleted file mode 100644 index b79bac9b9d..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToyNC41NjMyODgyNCAtMDQwMCBFRFQgbT0rMC4wNTc2NDAwNjUiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJQTVRlSGNMT0M5MUc4Rk50In0.bjQxqrY4tQJ8T8qA1V73gNw0HPW8Ajz6myOrIlX8jj9PCsD9duBsbQ.UzOo26N2xEPLM2Jg.ijNc2B_c5igQwBxzMk9EnZDPVK36JrC0dZ1InnsBuY_ZXktKbNnW_x9Ipx5irWM9oCW_7f7lA4QblxzVtvxrFNVgNja1ub91SM0cwtNABlRx1cNoAq0qi-2PZQGBO0aPJZwrRCMfEVS9snkZu1JMG74ezMtA72O8okt3YJdQjNTcKJ7YsP69qAy9F0cchlHmB58U8bXDoaddPrx41NVCWdiDmPZh4A_8wn0YMpoXY7k7xdDTA9CQb5JWalg2YmGg9j1h_9-1nicyxsDAItG2OOE7JnfU7kbg3-y4usAvEVViqskBZcah_XPVOLiYl3GgpPzTmZr9AeMfyQIYUVPkoBHNr_LR6es.RVtp5fYSTZhXmxxzPWJDvw \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/user2.info b/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/user2.info deleted file mode 100644 index 4812a16689..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToyNS42NTU1MzUwNDUgLTA0MDAgRURUIG09KzAuMDcxNzI1MTc0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiQXc5WXZSdWNCT3cyOGdMWCJ9.u-If16vmX554Dc3b1vEQvxy074ipGJZClyrZmFzJ4pLJg3h002ahbA.GDOrfF1gPcL0DtKA.KEFCtKYMZSqJqHcu8-5QWz8XnFyYF1WN9p7H_lUDUJD-rcGU0WR4nZhAR02qGM9unBU1BF8PqHoVkK5oNO9nAqg7G8pb4pi83AurRtOtdeNJOQBmlZeI12rLJorvtG2RxjsDfObWHlh-LW5TzAZbRRFSO8s8XK6hfVrNybcCMiBa8NuPDynSvEoTir7IrjmP_gKrNdeE6LHYafiED8cUSME30LX33SrJ2Fof8NTUTWRKV1pZ0a-vcrUkL4k2M-u0dvYPxRSmlJn6PH3uR31ex1ncoSSGIA5Sb1YYBhna_wqlrMzM2EpSKQlrjAPNAWCMLu2MjdheeAOfgK15V9JSyXP-wS8AJvSJ.QtanYYmmeLuW0m1sD7Cjzw \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/validator.info deleted file mode 100644 index 278a657d45..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0wOC0wNiAxMTo1NToyMy40ODU2NDEyODcgLTA0MDAgRURUIG09KzAuMDYzMzg2NzI1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiSEZiLTRrV3ZYVUY0bFFPVyJ9.aX-P2pGvLb0TZEMmPiAjvInmOnTeZVq-JjJUyL4Ox0_XfC8v2ho0VQ.ViPeDShGm714yf0I.b1M2DratDW_NpHqK3Gy7mXiJvJlS5JcCC3jn9TpciR6U3RCE9XXLQJBnAj622ABdM4-NbjqTbjIxhSw1kIX77ADBjWg5G0GzZ5beXku0haIds9NgBwv49oWpLS7SQFtnGZkZtae3uCPj65lnRaY2rf10h9ia5qcakzisxRWjxXZdBKzDOJqIWX3M_7x8W2M4IrH8bDqvaKiEjaIcD1lg95F7NNfLUGJHDds-4Y4fvqVpIbapbSsMr54WOQQ_DLpMz9b7ztataH6EIYs35GtuKaBfDFp-Rk7s2QGELp3N31EGLLcz2119fR8tw0LLrjcUpouFGUlFTPl-g1J1LefXcRKTOGbnh3dAASUoUNWV3E-zccSR.jTU1Bg5x6yQP5_zT0N5QqA \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.5/ibc-1/user2_seed.json b/ci/chains/gaia/v5.0.5/ibc-1/user2_seed.json deleted file mode 100644 index 730bf0e099..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos1lt47c46ydlmqs6tq5y4djkndx9u8mu3x6mqhje","pubkey":"cosmospub1addwnpepqtxdta64tecx2rdatsn0shkcascj4xu7a0x9p0cynqgn7jywrq905p244ce","mnemonic":"pact obvious seminar path address segment motion they artwork clever poet local normal dinosaur celery few hold hire charge chaos claw fine grow impact"} diff --git a/ci/chains/gaia/v5.0.5/ibc-1/user_seed.json b/ci/chains/gaia/v5.0.5/ibc-1/user_seed.json deleted file mode 100644 index 004dbc5b34..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1gz8hnf4l6yz7k3mmyseus3v69t4f8s60hth0fx","pubkey":"cosmospub1addwnpepqw2jrvfe98frdgf9yfvvjk6px4mtat5km4r7l6j8vhd0hlmd2akv25zdlk5","mnemonic":"host federal present budget champion erode wreck category journey bamboo entry degree destroy rice warm champion acid ability ramp cactus food shaft mimic vague"} diff --git a/ci/chains/gaia/v5.0.5/ibc-1/validator_seed.json b/ci/chains/gaia/v5.0.5/ibc-1/validator_seed.json deleted file mode 100644 index 0e3b8ebb2a..0000000000 --- a/ci/chains/gaia/v5.0.5/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1xs9mn3pfxgx4s2ncu0yejl0dh8lrquyzekqwz3","pubkey":"cosmospub1addwnpepq2mzpnuh8rd2v4ssfhaja4llsdv2g3d5vw2yplhkal7m3l8n0948ud4ufa6","mnemonic":"salad column stone sphere tuna teach trash amused cake menu pyramid half tackle scan initial entire resist become cost loan trade tonight manual release"} diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/addrbook.json b/ci/chains/gaia/v5.0.8/ibc-0/config/addrbook.json deleted file mode 100644 index 2c14bbdc95..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/addrbook.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "key": "82c133df9d6d66a97fe64ae9", - "addrs": [] -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/app.toml b/ci/chains/gaia/v5.0.8/ibc-0/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/client.toml b/ci/chains/gaia/v5.0.8/ibc-0/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/config.toml b/ci/chains/gaia/v5.0.8/ibc-0/config/config.toml deleted file mode 100644 index eb6f4b2952..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/config.toml +++ /dev/null @@ -1,401 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. ip and port are required -# example: 159.89.10.97:26656 -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/genesis.json b/ci/chains/gaia/v5.0.8/ibc-0/config/genesis.json deleted file mode 100644 index e2bdd082e4..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/genesis.json +++ /dev/null @@ -1,342 +0,0 @@ -{ - "genesis_time": "2021-11-04T15:47:29.231543194Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1exw8hfhy3y09nqfkvvr0y7crd0asmxcmauvuvd", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1vnatnf5m0g27uz2lk0dgq4dxg3gcscr7uajmv7", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1tgzhx34m3mamgzd5wjuwhhdptzu37crsmle9cz", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1tgzhx34m3mamgzd5wjuwhhdptzu37crsmle9cz", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1vnatnf5m0g27uz2lk0dgq4dxg3gcscr7uajmv7", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1exw8hfhy3y09nqfkvvr0y7crd0asmxcmauvuvd", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1tgzhx34m3mamgzd5wjuwhhdptzu37crsmle9cz", - "validator_address": "cosmosvaloper1tgzhx34m3mamgzd5wjuwhhdptzu37crs7tds53", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "b0Upk7Jh+8qlBg9/nBABHW57gwE9jRwXcVK2bw9UcWQ=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "e644f6ada6dc23fa6927484cca6e00183dc3fe0b@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "A50bVFQ0B4TqK6Jf9m0D9NhVEO0HqJFgoIhPpcY/5RAD" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "W0OZ0bf1Hs6gz8vCofrI267vAe8KT+AdaJpH8gTGx7lDK94R+3wwItI9HDak61KZSScQqln0gXjXIpXMu4BkfQ==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/gentx/gentx-e644f6ada6dc23fa6927484cca6e00183dc3fe0b.json b/ci/chains/gaia/v5.0.8/ibc-0/config/gentx/gentx-e644f6ada6dc23fa6927484cca6e00183dc3fe0b.json deleted file mode 100644 index a5a6bb6187..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/gentx/gentx-e644f6ada6dc23fa6927484cca6e00183dc3fe0b.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1tgzhx34m3mamgzd5wjuwhhdptzu37crsmle9cz","validator_address":"cosmosvaloper1tgzhx34m3mamgzd5wjuwhhdptzu37crs7tds53","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"b0Upk7Jh+8qlBg9/nBABHW57gwE9jRwXcVK2bw9UcWQ="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"e644f6ada6dc23fa6927484cca6e00183dc3fe0b@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"A50bVFQ0B4TqK6Jf9m0D9NhVEO0HqJFgoIhPpcY/5RAD"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["W0OZ0bf1Hs6gz8vCofrI267vAe8KT+AdaJpH8gTGx7lDK94R+3wwItI9HDak61KZSScQqln0gXjXIpXMu4BkfQ=="]} diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/node_key.json b/ci/chains/gaia/v5.0.8/ibc-0/config/node_key.json deleted file mode 100644 index 65bec3d2d6..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"PnKW6NeBJUMFjHt4zFCFcoODQ6igtYYqMjFp6MJNGKdb7ynzjZlXJoFDdL31vKFVjfqBLTT5J64JFslespTbkw=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v5.0.8/ibc-0/config/priv_validator_key.json deleted file mode 100644 index 8b5954526b..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "A129A9C5DCC0E76B98FEE6CC94007F5A9DD9FFCB", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "b0Upk7Jh+8qlBg9/nBABHW57gwE9jRwXcVK2bw9UcWQ=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "bllmLWHxWHFQecOBGM976AuAN9lD3RFHRTgrcY0au2JvRSmTsmH7yqUGD3+cEAEdbnuDAT2NHBdxUrZvD1RxZA==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/5a057346bb8efbb409b474b8ebdda158b91f6070.address b/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/5a057346bb8efbb409b474b8ebdda158b91f6070.address deleted file mode 100644 index f82461c049..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/5a057346bb8efbb409b474b8ebdda158b91f6070.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0NzozMC4zMTcyMTgyNDggLTA0MDAgRURUIG09KzAuMDczMzM0NDQxIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiZkJwSWlqbzZ3UHRMUDRhYiJ9.Xd813npMEw-9lrIGXKwsFu0CejBjnuQlCk5L1_mKs2fCACcHeyeESg.eT2fnPglp8HNTBKd.DBxSUHsnMx08T2rBia1ps1adGRxtH44DscMgTgL-D-xu0cHLLxkziH4U6pK1nD9TTA5p7OnFIv7-QnWpTvVhfbmPlmpmDqM-UkAPhMFIm7mtkBxUq1jf5qqRDHyT-5ikSEn0tIceZHC0cXg0C_son_QpW-LSCh-YUoNKpTBZCTX6u0jrZnQISzU4ztgYKpX5uSCIJLZel-3zqSw50p8HWneex9H5bQBbGItSBxEo6bARvGNCDD3N3rLq.0UVZuIkn97EnR1AL_Yrb7Q \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/64fab9a69b7a15ee095fb3da8055a6445188607e.address b/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/64fab9a69b7a15ee095fb3da8055a6445188607e.address deleted file mode 100644 index 4e62c6a822..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/64fab9a69b7a15ee095fb3da8055a6445188607e.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0NzozMi40ODAzMTc0NjggLTA0MDAgRURUIG09KzAuMDY0ODk5ODA1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiaDhIT3drRGw0UnplckxvOCJ9.vTyBa21aNqfLtvvdm0QftogEJQQp2FerqFR9tflolVBcE5-O59FuAQ.lJ6VZ4mXuG5M5K9P.ff22Ek0KiqEvRn1QY1oCKYd2qFPDgnJ3gccM1UWmoh6fIUx8Pbsktrnu45HtwHpnfEn8Gxp24snmRv0ad3Qq_h_j5lSS98mnoss7d1CLhtF7P7DBYwLNMDSNCbMjgD0-Bx0ZhdgTGoBW9_t23aEQH2yBVt6pfkse2Kd9hKvy31-MovUt-WyQ3DzxjyFwH3utdazTnIwyCmCH5fW95VraXeX2-ynI47pYQoZ3vA0StfYawE59CI4.RkuJB1gfEF2wWRwnk-1oKA \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/c99c7ba6e4891e5981366306f27b036bfb0d9b1b.address b/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/c99c7ba6e4891e5981366306f27b036bfb0d9b1b.address deleted file mode 100644 index af2a4f4748..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/c99c7ba6e4891e5981366306f27b036bfb0d9b1b.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0NzozMS40MDI1NjU1MjcgLTA0MDAgRURUIG09KzAuMDcwNTE1NTI0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiNjNjVEtlNF9QZi1WNW9CdiJ9.ZnIuGZ7P72EPIxV-hnmbpKNJxNkM23GUizPhBP520TSAQvGY_XsBsw.e4wvOVOnnoNdApXM.Joe1jjxS6ej_zICXtu6WpfcsZdXRqLoXdqK6_dPQslinSSfKKdekMb62zYCNOxeyjXb9RSu3EbYRGq3TAG8OWZaqpbsYuenjAvrlIRYqGY1jxwIFQwK6rTgGrZwzb_KxdB9mI99kGBwLP-T8VsvI-bWtBBri5sIIb-UEf_F4NPfI9vs4Bkco-PV520xPUN8T1e6oycJFYQYy2H99vcFddQ8ENrF2T8xr0v0HLeMd9fbnSQ.NSXG5ji268uoENc0zVHUmA \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/user.info b/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/user.info deleted file mode 100644 index ab21145159..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0NzozMS4zOTY1MzkzOTggLTA0MDAgRURUIG09KzAuMDY0NDg5MzkxIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiZ0t6ZjgtZHNDVktwRHo2ZCJ9.pVu0Mtx4jNLLIDS3pgoN02A3GI1NOyof03z2kQBthJkkLIFn8aoKYg.YV7kj3HXaHRBuaS_.o2ODJ4i0huip-aamiyMiDmuZc9ZOc65R79fKmB46efEl8jcjog7Zql5z0WoKtFh9qfkHAUYnMM1dVZ3O6d_gSZU-r-nEoq4WqZep__zLGJ7ETX-WPz11y-oi4ZeOpXeEh7gCDFb9kZhIzIDvoBv-qYVfOFC33OoN6Bz2OGtC4aIjVPf5vwcTwaRhZMaM_atH5smS18LODhppcSmJAjV9xjsca3KGQtAAYLYuVT3PG36W7p4JAww2m3AeeqHP0tdQQwa6sg8PLOQg9Zxuw9H614ZEgGX7-Ao9iVUkqP8BCOCoS9RnffAkfP0WCR7KNoA5IxJAdk8kKBWJfdsKb7xdyO5RNSIzbuk.xkWnt_f8cV9kh7it2xXK7A \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/user2.info b/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/user2.info deleted file mode 100644 index e30f9f7b76..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0NzozMi40NzQzNDY3MTcgLTA0MDAgRURUIG09KzAuMDU4OTI5MDM3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiVmNSTG9SMmhQWFhtM01oNiJ9.qcJz1u3RDq-IFS9eLisFpeU3mKBn4TuQNzr7WcMurqjW5EmNmUEtKA.7RRHagT_GFcnaFxJ.pkLQs5JLngcUu9FIaM6dpx3TIWmw8hMLi9DVq9DjVPdwO-_79CBYAUf5-p62iw7czNl5f5l0YwtE-_d2pwcsS3sl0F6Ez2RsxVCvZrC755XCuFktkWWL-5uXbUpFNyvZs-6ATOxhr0aoAkF5ZGkUUFweTMZrglLGh5HlKymVrSx9MXSqVrZOB3CFhZVxX-EobcxKm4x9Jph7LpDj1JODpDcOawB6zZwMF0bqYPywTfKnrZlB7xlmhcwv_43vh8Yh_EhXeCj4ps4i7PqMiBLg2Wn9Mi8SYL5P-C_J2gi_PvsPU5zKkVDVuqEGrlMCTWEz4s0tAeEwoJFk77MpPK323jXv4U87o4pc.W5JsaLin4N_eJ9rFryQJEQ \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/validator.info deleted file mode 100644 index 5aac0f6eba..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0NzozMC4zMTA4MDQ2NDcgLTA0MDAgRURUIG09KzAuMDY2OTIwODYwIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiejF4dkoycjE5SjNMXzhCUiJ9.n8QDC3Qcju_iKHP-15BXBPKcAedMbGd6L8MmglKV1gkneTZoFWAIvw.a2J96EcNQ6dLaguQ.2QRIomjZxj5ROCWXfe7wgxnWl0_MncasnkmsbP5yH0m-lmjd_sRFIQ53MpIsFqAHQj1xFBmJRP2GqXQZkQt-QeIrXyzylzmDK9FtlKL-gvCxuoOKync4Il62SmnClMkFGPObebRKIAluQoZ8DzVYWSklWyP51eIBewz2FyONvQkOS5ZTUYcTzqhnK_ZwQM1t_uzBQ9TvP1sUvABRsAwvfWZke8iP4I-uBHlMXO-34bCQCmYRRo9TF49ti0T3tbAt7rDjmhv9N0BrGv4Ir_o81wo-4Zd0xgdtZlu_Nt8_99qnF9hQkAvkUD6u6NyVOGC3Gpppjl6D3KkWxarGkbJFiFVubMLPmkO_3fll7kOsEEFMjcKR.2TUybiWXhefRJJF6cPTGHw \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-0/user2_seed.json b/ci/chains/gaia/v5.0.8/ibc-0/user2_seed.json deleted file mode 100644 index 20fa1e4754..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos1vnatnf5m0g27uz2lk0dgq4dxg3gcscr7uajmv7","pubkey":"cosmospub1addwnpepqg2wvn5a6kn7yjqqxvchav8g9yz46xywfvn248qvmj56jpxypjvakjqsc3m","mnemonic":"naive early elbow chat model athlete lottery unfold comfort scare portion army era patch yard penalty two time student jazz middle endless execute relax"} diff --git a/ci/chains/gaia/v5.0.8/ibc-0/user_seed.json b/ci/chains/gaia/v5.0.8/ibc-0/user_seed.json deleted file mode 100644 index d99cc314dc..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1exw8hfhy3y09nqfkvvr0y7crd0asmxcmauvuvd","pubkey":"cosmospub1addwnpepq28jx6t5xgfl3pkvgydru2h68pwuqhunkm70hfttqp2esswctdzv6echc75","mnemonic":"lock border during undo menu crouch ticket absurd slight remove sock more nominee ketchup night parrot firm future essence need devote client sugar stadium"} diff --git a/ci/chains/gaia/v5.0.8/ibc-0/validator_seed.json b/ci/chains/gaia/v5.0.8/ibc-0/validator_seed.json deleted file mode 100644 index 1c00c62d5f..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1tgzhx34m3mamgzd5wjuwhhdptzu37crsmle9cz","pubkey":"cosmospub1addwnpepqww3k4z5xsrcf63t5f0lvmgr7nv92y8dq75fzc9q3p86t33lu5gqx5vfz4y","mnemonic":"trigger liar blast diesel fall mention quantum vast walnut mail rally there village crop timber assault bachelor scene taste hover top caught concert rather"} diff --git a/ci/chains/gaia/v5.0.8/ibc-1/config/app.toml b/ci/chains/gaia/v5.0.8/ibc-1/config/app.toml deleted file mode 100644 index 54de73e392..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/config/app.toml +++ /dev/null @@ -1,152 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v5.0.8/ibc-1/config/client.toml b/ci/chains/gaia/v5.0.8/ibc-1/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v5.0.8/ibc-1/config/config.toml b/ci/chains/gaia/v5.0.8/ibc-1/config/config.toml deleted file mode 100644 index bd592119ba..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/config/config.toml +++ /dev/null @@ -1,401 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. ip and port are required -# example: 159.89.10.97:26656 -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v5.0.8/ibc-1/config/genesis.json b/ci/chains/gaia/v5.0.8/ibc-1/config/genesis.json deleted file mode 100644 index d1177e4ac8..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/config/genesis.json +++ /dev/null @@ -1,342 +0,0 @@ -{ - "genesis_time": "2021-11-04T15:47:42.444060916Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1cyy7533l8l3alkmdnqph5rqeu2mdgn9q85ulcx", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1j6q82mthnkytmdr63u942g43xkqjcwssw6e2x9", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1xaz9g9p9a6lcqznyakd3nc5qu85cz6zuyw5ytv", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1xaz9g9p9a6lcqznyakd3nc5qu85cz6zuyw5ytv", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1j6q82mthnkytmdr63u942g43xkqjcwssw6e2x9", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1cyy7533l8l3alkmdnqph5rqeu2mdgn9q85ulcx", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1xaz9g9p9a6lcqznyakd3nc5qu85cz6zuyw5ytv", - "validator_address": "cosmosvaloper1xaz9g9p9a6lcqznyakd3nc5qu85cz6zup6q38l", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "Q3lqhqi32SHvUpEqoVMHiRGXbThJ7Sbz98IYFIM+guo=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "b2617c1100350fcb281cc6749f1f79956222be5a@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AgfKYVHZJ3hgLBEDTuPJ6yf8akWkCVlsLunRmZ9PVh4w" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "1KY7yzImO543wGvV8kIlxDgUxm6Tcc3ajtLLYV89X/tTX5A12F6IdBBibcU52joSM9eAFg2pUDhWXlFRybqaZA==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0" - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/config/gentx/gentx-b2617c1100350fcb281cc6749f1f79956222be5a.json b/ci/chains/gaia/v5.0.8/ibc-1/config/gentx/gentx-b2617c1100350fcb281cc6749f1f79956222be5a.json deleted file mode 100644 index f9b223e349..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/config/gentx/gentx-b2617c1100350fcb281cc6749f1f79956222be5a.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1xaz9g9p9a6lcqznyakd3nc5qu85cz6zuyw5ytv","validator_address":"cosmosvaloper1xaz9g9p9a6lcqznyakd3nc5qu85cz6zup6q38l","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"Q3lqhqi32SHvUpEqoVMHiRGXbThJ7Sbz98IYFIM+guo="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"b2617c1100350fcb281cc6749f1f79956222be5a@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AgfKYVHZJ3hgLBEDTuPJ6yf8akWkCVlsLunRmZ9PVh4w"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["1KY7yzImO543wGvV8kIlxDgUxm6Tcc3ajtLLYV89X/tTX5A12F6IdBBibcU52joSM9eAFg2pUDhWXlFRybqaZA=="]} diff --git a/ci/chains/gaia/v5.0.8/ibc-1/config/node_key.json b/ci/chains/gaia/v5.0.8/ibc-1/config/node_key.json deleted file mode 100644 index 1e4f16abfb..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"iEFaFp4baiAkuUQGKLM6cDiEaJ1wNdWKcZmK/vXqmmcDMwlXEQKRIRv/2RDKG75dRAm9vL8oMa448/NUUpJDCA=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v5.0.8/ibc-1/config/priv_validator_key.json deleted file mode 100644 index eec913ba7f..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "AEC2C6BCE9572CB47610ADE77F3C075DC6B3D717", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "Q3lqhqi32SHvUpEqoVMHiRGXbThJ7Sbz98IYFIM+guo=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "1+GCG7iNUe8caJWcaPhMHxUIcUZPdUo9MIN7UGSv8PZDeWqGqLfZIe9SkSqhUweJEZdtOEntJvP3whgUgz6C6g==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/3744541425eebf800a64ed9b19e280e1e981685c.address b/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/3744541425eebf800a64ed9b19e280e1e981685c.address deleted file mode 100644 index 518db809e2..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/3744541425eebf800a64ed9b19e280e1e981685c.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0Nzo0My41Mjc4NTQxMTUgLTA0MDAgRURUIG09KzAuMDc0MTQyODI1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiT0tGblVsckZOQnBwY1hiOCJ9.v4uS3F3sRZaXHOjSqg7ikPWiPaRcVwlfjvNUFnDhN7ud7ZNn9h29TQ.y3FESL0YTY7Qb198.UBdUf8mh2WtewKbMiDZ1fcw-TvuoMMMkKDyAjhHJRMa0nemizWCyqx_5gdzKkmD00IgZwDKfrT4BCu-axk-Ou6_O__XVerJGVQrgg28gi6dkWWy9Uju3b8KpB-oucibpwffc6JpDMD6KuqhawtzZEHq43yZob-IBIOPJba3MdWaCGgEdf12sHX_ckghjy-ZYExg92M1WID8JwbEfjBp_14FmJcvRICEsfkacs1ceZK0C0Gt63swZ59ue.6saz2fRViVfN_eCAzpS2Iw \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/9680756d779d88bdb47a8f0b5522b135812c3a10.address b/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/9680756d779d88bdb47a8f0b5522b135812c3a10.address deleted file mode 100644 index 30eb62ba8c..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/9680756d779d88bdb47a8f0b5522b135812c3a10.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0Nzo0NS42ODk2NDExNiAtMDQwMCBFRFQgbT0rMC4wNjIzMjY4NzEiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJnWVV5YlV3b2lLb3lEaGlxIn0.QCgBZNULcfesHuKvVDbDJQDK_LmptOir7iRwtsFlhqe9jvxv2FPNAg.ZuySvpSIgoKLCoRs.cugISI2Y5eEjl2T93YxTy5zZQrK5O5c0FKR-lvlRfiEMd8cDqei4pykU8RiHYKU4H-CPsuLLhRX3S7iooasDoCLdOo9a9MKo861yBx9vayMc0sz-zZN8OCoIyDQ9pa5iHNuzL4kSJOJGUrlQMk3bKNcjALx_-W9qBYQlQ6DN8wnowPL0NqblfdE92aD6cq8DOA1gT8lltMs7Q7psLPKKt18OCyhYi8KSEjQu3HALbPPOiRJHcTE.YSN_T3AQZwJx7k5MMxqhzg \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/c109ea463f3fe3dfdb6d98037a0c19e2b6d44ca0.address b/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/c109ea463f3fe3dfdb6d98037a0c19e2b6d44ca0.address deleted file mode 100644 index 8c2f468fc3..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/c109ea463f3fe3dfdb6d98037a0c19e2b6d44ca0.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0Nzo0NC42MTM2NzQ5MiAtMDQwMCBFRFQgbT0rMC4wNjg3NjczMTYiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJFdFBGZURzSUgzWjlBeE50In0.KwEj3f-Mmxb0t-i5GlKga5zM7jka30UbzYW-Sngyz7jecvjdflkEsw.mTTn8vgmlEJTa9OZ.e3yxoN3gqXU_eUXoeYICL-QcEevYvmp44oOBDTovuQK2jNVJPwLmS0Bp6Pzqe-WHiumBB2OSrA51kHPvj6hWIHRCmO7VQicODLMLSi1o4n3p3jR3pQJepz0JEoc8nMRr-QknJALFNoL1JXR6hSGum2QXFl4u9aBXOwkPStUCJG85UZ36SnXKCRsXvDkC7Z5DLntNxp_WGq2mfh20wJRYcqoPN9o0nVr2YF1JShqIiKfe5g.5e1lpxCz9Fi3pks7vWVFoQ \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/user.info b/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/user.info deleted file mode 100644 index 7b7f6c3d9c..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0Nzo0NC42MDYxNDA1NTggLTA0MDAgRURUIG09KzAuMDYxMjMyOTQzIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoidTJpNEQ3RmFqZHlXRXFDeCJ9.ylqk8B5MaEwCYmLIukes3XkXxGKkaB6kFMVYNcV0_-5z29vt9WUQ0Q.bvkej9imldLoFWcm.jRKIDMaq2T5grW8s7TtiK8_hMM538Ivi7soGIyDxU3PomUqLlL3ul2L2mgwmcmICm210AToxc3Fx4KIQbf6bB-hesPcfXNQgFsJuTpXAtZ8DrZg22xs_8ZzgllN14LWx6wAGmWsinFUMS_RLslD3CJFddzO39j2UWFJIBKLo8zkfnvIDWbe2Vda0RVrUPiOMbgzqaJM73DDgXxpir-aER2UtSaeBHFbi_DxxNyLrVeS90Lu8A5LfY3WWc681Fv_Cvs8XTIm0VI9dVsq-qFejZNy9dN--jz-x9SJcEMwb_QQ32El7sB1Yo6VfBVWCRvWmGQjDqV68Wg7UNM0biKc4CHzOac2OIHM.Vr8XbTJkkAOAW4cFuZ28OA \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/user2.info b/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/user2.info deleted file mode 100644 index 45b722ac06..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0Nzo0NS42ODM2MzI1OTUgLTA0MDAgRURUIG09KzAuMDU2MzE4MzI3IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiYXgwVTVtZ29sRmxUMEZFNCJ9.yaKh9b08Pi26qhmKuwG2GgMgmDWY_KV6zb1yJ_I8-JZHlg61QN146Q.AFqz4pfoU65bFkza.vHSXUU9ElCcFpHuyOr_lf3FuSfobS6Nn98YXNjOcsj5tcw08zFb3jTeMd90U4b2yVl-g_jUzBwz6tVTsMvKXjEyyQP4zbLK9RiYAvo2d4Qfz6Cqof7lpFE1nrs8hzER7rTkyEoZ37Bfl6D-cHWLZip4Wlv8v1GEGrbQnvRRpUNhBWRNHoR_ZvD47C_KRZH2DdT-ieLJRMB1pIVWMjgATQdhfimBwlfBNFph54ajPnc33nEx5YVwgUXqmZBeJYoiNUO5MmwR1EYHyLtKQUFzvkvj_nzkANbog2xJLYcLuUaw2JLc0faPp5gsUZnUer5ms18FoFKsd80eRaZ4nCXWfvERDRl_812qj.IIfR3WCV_7WYXUbytSzgvA \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/validator.info deleted file mode 100644 index 95b59b0c61..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0wNCAxMTo0Nzo0My41MTkwMDQyNzIgLTA0MDAgRURUIG09KzAuMDY1MjkyOTkxIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiVUFZVzROV0NGU0NIMG5fUCJ9.jgdjIazR7B2dqQ-d2G_C8ViJN6jcaVzwSHZJyD4huBtEa7LkpzCxxQ.pgLPbRYyu2l6RUKY.FXQaljl1rrsxyMqjGKyJzN2GQQdrB34LYqQWSSYSnc5TfevEYyc7QwFbgeu-DIAqo_xxmmBTLGftY1X_R5WCpmXqyhOkxVKpPSsrHsSwBdNalxa9AHyQB27RbMOe-gqte3MyUd0XXaTl365nLA7vCPiJnGsNUDjOLbu-lEJWm3bN36daH43Ifb1BACYH7TndPU3c8hOOmyxHVlvYHyZutVfqCh9kr80GIXVTYwWTV5LMcKGtgfc-VmljYGAaI7IqdeiMlsZcr_l8TSeHZHNFGxZXc8jBHezMxr8qnD_p2UKnYylIPTfu4U_Ea0yeWHuIQBCyWTXyf02Bf88foTXGJNiScIWJxLTLdbVtem59pT6OCA5x.IErhLYYhkKMX_UlVlDtXxQ \ No newline at end of file diff --git a/ci/chains/gaia/v5.0.8/ibc-1/user2_seed.json b/ci/chains/gaia/v5.0.8/ibc-1/user2_seed.json deleted file mode 100644 index 48cde56108..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos1j6q82mthnkytmdr63u942g43xkqjcwssw6e2x9","pubkey":"cosmospub1addwnpepqdjxwx0t2kj2qyw4q6gj8mr40y6mtljdsa7u6hhhteaw32vshnucj59wprp","mnemonic":"nurse ordinary pulp find square senior club great balcony monster faint glass arrest curve message orange fan syrup pepper smoke medal tent sheriff grocery"} diff --git a/ci/chains/gaia/v5.0.8/ibc-1/user_seed.json b/ci/chains/gaia/v5.0.8/ibc-1/user_seed.json deleted file mode 100644 index 4b94f9d064..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1cyy7533l8l3alkmdnqph5rqeu2mdgn9q85ulcx","pubkey":"cosmospub1addwnpepqd8rasvzjylp56ezk3ydvrte0k8s9gw3a3au00rdqapu80tqqk66q4sae27","mnemonic":"toddler strategy wine bridge short position animal brown cream slogan merry beach dust exhibit stem wire once offer name van pilot code course observe"} diff --git a/ci/chains/gaia/v5.0.8/ibc-1/validator_seed.json b/ci/chains/gaia/v5.0.8/ibc-1/validator_seed.json deleted file mode 100644 index eeb2b00014..0000000000 --- a/ci/chains/gaia/v5.0.8/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1xaz9g9p9a6lcqznyakd3nc5qu85cz6zuyw5ytv","pubkey":"cosmospub1addwnpepqgru5c23mynhscpvzyp5ac7favnlc6j95sy4jmpwa8gen8602c0rqemmd7c","mnemonic":"prepare detect federal maple ability require blood slam hazard universe soon bubble simple canal rapid style proud thing horn warfare galaxy wood exhaust advance"} diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/addrbook.json b/ci/chains/gaia/v6.0.0/ibc-0/config/addrbook.json deleted file mode 100644 index 388c554d6a..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/addrbook.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "key": "abcfca3214aae21d8fb33c29", - "addrs": [] -} \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/app.toml b/ci/chains/gaia/v6.0.0/ibc-0/config/app.toml deleted file mode 100644 index 1fc0b791f4..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/app.toml +++ /dev/null @@ -1,192 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### Rosetta Configuration ### -############################################################################### - -[rosetta] - -# Enable defines if the Rosetta API server should be enabled. -enable = false - -# Address defines the Rosetta API server to listen on. -address = ":8080" - -# Network defines the name of the blockchain that will be returned by Rosetta. -blockchain = "app" - -# Network defines the name of the network that will be returned by Rosetta. -network = "network" - -# Retries defines the number of retries when connecting to the node before failing. -retries = 3 - -# Offline defines if Rosetta server should run in offline mode. -offline = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### gRPC Web Configuration ### -############################################################################### - -[grpc-web] - -# GRPCWebEnable defines if the gRPC-web should be enabled. -# NOTE: gRPC must also be enabled, otherwise, this configuration is a no-op. -enable = true - -# Address defines the gRPC-web server address to bind to. -address = "0.0.0.0:9091" - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enable-unsafe-cors = false - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/client.toml b/ci/chains/gaia/v6.0.0/ibc-0/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/config.toml b/ci/chains/gaia/v6.0.0/ibc-0/config/config.toml deleted file mode 100644 index eb6f4b2952..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/config.toml +++ /dev/null @@ -1,401 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. ip and port are required -# example: 159.89.10.97:26656 -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/genesis.json b/ci/chains/gaia/v6.0.0/ibc-0/config/genesis.json deleted file mode 100644 index caa8b3f630..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/genesis.json +++ /dev/null @@ -1,356 +0,0 @@ -{ - "genesis_time": "2021-11-24T19:33:05.288086182Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1n0gwen2cakk64kk74yguhw3au6rctlyxf5rvmj", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1rdy5sg55yp45q3rklpqwhprg5zdlg7tzuxdlhj", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1da0qjh7qptrscq59uaqjwtte0ru7mnfp2ndcxu", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "authz": { - "authorization": [] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1rdy5sg55yp45q3rklpqwhprg5zdlg7tzuxdlhj", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1da0qjh7qptrscq59uaqjwtte0ru7mnfp2ndcxu", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1n0gwen2cakk64kk74yguhw3au6rctlyxf5rvmj", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "feegrant": { - "allowances": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1da0qjh7qptrscq59uaqjwtte0ru7mnfp2ndcxu", - "validator_address": "cosmosvaloper1da0qjh7qptrscq59uaqjwtte0ru7mnfp08ed20", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "NYFNoMsPbFYg4H/WodS6spPKJuYsFWOz/uEQMxiA7Mk=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "f8825df2c406207c64cbc02b1f543a53a46094e3@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AoMRzDhz5CdelMYvXq/YUvy/pJTub9JJ8SV8yqTlyOmc" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "fo1Cp9mv2cOpF+bpO0z+emIzeALBVhyyslZ29mvP2QJU7kCbq1KPeQ6xrQMJWNo5IiEvs0JBdhRHEC08Fbbp6Q==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0", - "params": { - "max_expected_time_per_block": "30000000000" - } - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "packetfowardmiddleware": { - "params": { - "fee_percentage": "0.000000000000000000" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/gentx/gentx-f8825df2c406207c64cbc02b1f543a53a46094e3.json b/ci/chains/gaia/v6.0.0/ibc-0/config/gentx/gentx-f8825df2c406207c64cbc02b1f543a53a46094e3.json deleted file mode 100644 index 685abc3aef..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/gentx/gentx-f8825df2c406207c64cbc02b1f543a53a46094e3.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1da0qjh7qptrscq59uaqjwtte0ru7mnfp2ndcxu","validator_address":"cosmosvaloper1da0qjh7qptrscq59uaqjwtte0ru7mnfp08ed20","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"NYFNoMsPbFYg4H/WodS6spPKJuYsFWOz/uEQMxiA7Mk="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"f8825df2c406207c64cbc02b1f543a53a46094e3@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AoMRzDhz5CdelMYvXq/YUvy/pJTub9JJ8SV8yqTlyOmc"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["fo1Cp9mv2cOpF+bpO0z+emIzeALBVhyyslZ29mvP2QJU7kCbq1KPeQ6xrQMJWNo5IiEvs0JBdhRHEC08Fbbp6Q=="]} diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/node_key.json b/ci/chains/gaia/v6.0.0/ibc-0/config/node_key.json deleted file mode 100644 index c5ecc004ac..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"ZMa5+ovMRjEApthUCunbPP4UIS4BkLwrN7ZUYqNsI6mXanUuQ3pszUuvB1YT4uXgQSNRUVVZ+0yITLieUxXSDA=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v6.0.0/ibc-0/config/priv_validator_key.json deleted file mode 100644 index 02126f7d9c..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "858D7DD17C7EF440D4AE4288C5BAA80EAA51D7FD", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "NYFNoMsPbFYg4H/WodS6spPKJuYsFWOz/uEQMxiA7Mk=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "7pxUnNSpT3r1VnjhjgbMBIekqm3HlhqWzj72wp4/VZI1gU2gyw9sViDgf9ah1Lqyk8om5iwVY7P+4RAzGIDsyQ==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/1b49482294206b404476f840eb8468a09bf47962.address b/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/1b49482294206b404476f840eb8468a09bf47962.address deleted file mode 100644 index ab43a81270..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/1b49482294206b404476f840eb8468a09bf47962.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzowOC41NTA0NTQxMzMgLTA1MDAgRVNUIG09KzAuMDcxMDg5NDgzIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiUE1YZkJzdHdtTEZISDFTUCJ9.e2R6TeUCkYqRtQcpeOIZ9MH7LwWmKYsF-9azSTSBBGSkPjEmY49gTA.dzTMUVpONW5lmbuZ.2KL3qYiMBjqzewecmeI8ooDqhYw8nTb2MhBL9CZ3Blm-1PrQR19YQvcrYEzIhydKUNS-tm_vDDxQ7b-FtEWwGuZak7ErpvrKhsZFQiLEyHg0F4UEvWp5W4fY6hvIso8cf5HlIAHvSnffej1WfngiTSV23u2KbgvoiDU7Ctf5uYpBjaruHRbzk738ruKSTRyWPrag5L3IU-5uRNfhxXH-pGa7ftZcmPwMhTxCGojghgSmg9ZkGgQ.2UgcRahsvtKKqlHlNtnZsg \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/6f5e095fc00ac70c0285e741272d7978f9edcd21.address b/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/6f5e095fc00ac70c0285e741272d7978f9edcd21.address deleted file mode 100644 index cbd24616ab..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/6f5e095fc00ac70c0285e741272d7978f9edcd21.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzowNi4zNjMwOTU1NTkgLTA1MDAgRVNUIG09KzAuMDY1Nzg0NzI2IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoicVFEZXh1UFllT3l2RG95UCJ9.80eJIBA0YgpL8j7mOFmajvU3VwJyv8xSVdhJOIn3KEwOwWIPAdvuZA.JOxBKB717wxiUuEN.VLZqTx5iM7dUvFQ75UbVhz4a-Wz1cgaRmf_-3cUD32-6c84jk8FyDuFlc1Tb1umMf8H1vwIE9higSQFMtZeuzCxIG08a9zTfXPyqWHfs5aodiDi9tDhMH1t2FQz1uHlf0UaAz1_tLyFeY0LuK_pXk_-err9r2ACgjDKnMUI_y5bmfxS1EGOLvylcKea2s1kROzYz_h2avUodhHg3Nx-qNtwlTxr6Hv74E21ZJPEtLe5PAb8Rm2LGrPpO.s7eBd83I848F6Vj-WcS3fQ \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/9bd0eccd58edadaadadea911cbba3de68785fc86.address b/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/9bd0eccd58edadaadadea911cbba3de68785fc86.address deleted file mode 100644 index f9bb589ad5..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/9bd0eccd58edadaadadea911cbba3de68785fc86.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzowNy40NjE0MjQ3NyAtMDUwMCBFU1QgbT0rMC4wODQ4MTQ4MzMiLCJlbmMiOiJBMjU2R0NNIiwicDJjIjo4MTkyLCJwMnMiOiJ6aFh6S0haLVVQOWhnRVFnIn0.xXrGsXcX7D1hn6m_nAwcGfAkdcgjbmLz_UEmlE0W45mFUAyljxjRkQ.KMAJFWKlCnYw4mvX.DyhKx-xyqWKJrUS42VAtNkSWKVqkogh2q-joJOFPJcF6f1IcN73JqZxdYmvsai3dRGs5xiTQsjsiM6tjvDKSFEaT3vYqpPwxTOJu2pvo_vWbNlNzauQyWgNiSiAPvtEWZI6PCTdk7FuyPBfm3dE4SkWuQBUuYZNoUnkkEXWDmqD3lKZ2ZIYcc0hqA4FQVP1A7RL4J16kn2BeXOEBPsUH8JZbplUDXA4UAnCFAtF_JdCgug.6K4vdxOdRJ9rbZa7cKxJwg \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/user.info b/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/user.info deleted file mode 100644 index 9fb08e779e..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzowNy40NDc4MTY1MDUgLTA1MDAgRVNUIG09KzAuMDcxMjA2NTUyIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoibl94Mm1fOWVjbzhpeUxvQiJ9.ikzAEDoLYycFQVexbskD1kRzstVFXpR-YNz_kRrLMSUNSgkhhZGZVQ.7zH7erlv7nJbS5gX.xAIev6M3iU17fXOFC4SU8Al5JJp8_M7oaambw5ZxgmFDyajwffxsuXYIW7Y9MLGJNJ8yVHic-iN9HXdbxNIBfMsP138D6VXpksxAlbEWoVRJihwvqF_iqEHcuEyD212WEwIPTY6hH822xZV4l7yQlEHoGlQtUnMuWQz1IzvcKFYESUUavfiiSfP0CFbY5MAd5JremAtH-vN3xJ9pqcL0hOl9hmeKmc5pxZtnrbiTyVYiDJI8zPYeWFKTT_AvKoT9aoa1Ouo4qZ2whbUYufG9dC_DDhCcUATJocFBqNegahBOgegaJxDVzbv2PLqgWTrwpaMi0XiKMo0xt4HYt0LVNtbrpgJjHD4.PK-PK_u4eJmtbGd4y4G5Yg \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/user2.info b/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/user2.info deleted file mode 100644 index 5f864632e6..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzowOC41NDEyNTcxMTQgLTA1MDAgRVNUIG09KzAuMDYxODkyNDY0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiMXU3TFdzdjBZSHdwbkNqciJ9.fMo5HmXetkpNLdBkXlnIZk4ApD-nFYMME6dmdFarg7zhFL7QCFN7KQ._8e7Jva2GXFtzSt8.I2ta6Y6HKwoHkTx8-rPjLcajthmS90-iVMA_0FIup9lNd5LBLQpNAHY2hDbhCUbr3rNBPhTDVz6fD_S3ImfGjg4rQePDvOGgjntteAJ_e4-h1w0PCliyLlUM24Bw83aJmT1tubJ_bLe1Jkowg2FFv7b4QIY4sEslkoM0mMFmjgghZgVYzK_7gHY-KfXzXhEt-7lNypUbm7Te8ZQoZfzqcjwiJuwHKmXA2F5aWlNZK2ry6ivWWgRVUzwS0VWBRaDlx60R4PIrR0kqF_H8H4SDa_yIJsFyoLLliMgUz4N8COs6dHc3rf-jfopVtFriVDF7qRuKvfh4cL2FPiD6A1_kGBG4QxTWytCx.uiXHyY__OXUydn-keVhDyw \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/validator.info deleted file mode 100644 index cd874c66c9..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzowNi4zNTYxNDUzNjYgLTA1MDAgRVNUIG09KzAuMDU4ODM0NTI0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiT1d6Q3h2a2lleHNUUXhXWiJ9.sMP2Opeadzy1NIqSFinrOosc9ueNGl6eoXzSlj1PuyjzKVDNPJBG9w.ymFHLPlxDdlwKchG.v7aqEHKE7zGY0DHDrV80QujgwP0dCVJG8PRsZv30pE_49NxiFjMJF_leVa52oiUN_8HCJsitXTIcs8Kku8eqL1FYPrFG-_pNZduDHM8w3dU40rYRhpEqo3NApemWXYeX5RQdnuXTHqriwIxOAsTbpoHxls7kMGH3AyDxz1yzfeATF6C0wKIUj1sssYg1dNX6PK2Vo2yyVWdmNFFX5UdUZZhSHmHN0t97k-GiYOYSfu92NLy4Nv1QQDchYuTGKdLdxMV_NApAlYUFFijFZthtEdABNzUqiSi296uCj7pw1uR8K9X-bzITkWb3_Yh-ELHbKbLAdF5FBST9Sw76jiqSGAQoZsunDNcnsJSgsAMcnGRtzBa8.FBJD9ZKZOHhaaeO9_y8byw \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-0/user2_seed.json b/ci/chains/gaia/v6.0.0/ibc-0/user2_seed.json deleted file mode 100644 index 6a2856a3d4..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos1rdy5sg55yp45q3rklpqwhprg5zdlg7tzuxdlhj","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"A6qQIsP43N+JyGeXtjYV10mUC3gfk1QcxEredSeNlezv\"}","mnemonic":"response group easily vicious master immense circle flock bridge chest cannon fog acoustic million plastic cinnamon rotate butter minute fatigue seed stick cousin mule"} diff --git a/ci/chains/gaia/v6.0.0/ibc-0/user_seed.json b/ci/chains/gaia/v6.0.0/ibc-0/user_seed.json deleted file mode 100644 index 05c5482efa..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1n0gwen2cakk64kk74yguhw3au6rctlyxf5rvmj","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"ArOL1s7aGOyN+cvnOzf/b+F0aSX0gcNOmo/KKTxFqZ/a\"}","mnemonic":"mass symbol wrap wear typical romance machine cart club famous celery impose fancy chief emotion excess figure wet insane muffin tone awful coconut romance"} diff --git a/ci/chains/gaia/v6.0.0/ibc-0/validator_seed.json b/ci/chains/gaia/v6.0.0/ibc-0/validator_seed.json deleted file mode 100644 index 558019b078..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1da0qjh7qptrscq59uaqjwtte0ru7mnfp2ndcxu","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"AoMRzDhz5CdelMYvXq/YUvy/pJTub9JJ8SV8yqTlyOmc\"}","mnemonic":"dizzy lobster gaze luggage strategy drama negative bright trumpet favorite upset chief month popular hungry potato mad nephew peanut ship city purchase method metal"} diff --git a/ci/chains/gaia/v6.0.0/ibc-1/config/app.toml b/ci/chains/gaia/v6.0.0/ibc-1/config/app.toml deleted file mode 100644 index 1fc0b791f4..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/config/app.toml +++ /dev/null @@ -1,192 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### Rosetta Configuration ### -############################################################################### - -[rosetta] - -# Enable defines if the Rosetta API server should be enabled. -enable = false - -# Address defines the Rosetta API server to listen on. -address = ":8080" - -# Network defines the name of the blockchain that will be returned by Rosetta. -blockchain = "app" - -# Network defines the name of the network that will be returned by Rosetta. -network = "network" - -# Retries defines the number of retries when connecting to the node before failing. -retries = 3 - -# Offline defines if Rosetta server should run in offline mode. -offline = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### gRPC Web Configuration ### -############################################################################### - -[grpc-web] - -# GRPCWebEnable defines if the gRPC-web should be enabled. -# NOTE: gRPC must also be enabled, otherwise, this configuration is a no-op. -enable = true - -# Address defines the gRPC-web server address to bind to. -address = "0.0.0.0:9091" - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enable-unsafe-cors = false - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 0 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 2 diff --git a/ci/chains/gaia/v6.0.0/ibc-1/config/client.toml b/ci/chains/gaia/v6.0.0/ibc-1/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v6.0.0/ibc-1/config/config.toml b/ci/chains/gaia/v6.0.0/ibc-1/config/config.toml deleted file mode 100644 index bd592119ba..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/config/config.toml +++ /dev/null @@ -1,401 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. ip and port are required -# example: 159.89.10.97:26656 -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v6.0.0/ibc-1/config/genesis.json b/ci/chains/gaia/v6.0.0/ibc-1/config/genesis.json deleted file mode 100644 index 2f2a75c350..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/config/genesis.json +++ /dev/null @@ -1,356 +0,0 @@ -{ - "genesis_time": "2021-11-24T19:33:18.421970195Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1y4e9e3srnuqcul7839cmttc4rjct2gece5y34l", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos139lr5vxkr94lf85nr4zc4dvmj52qmmn0er2hrf", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1zx9v7cdvltsgyzymc6wwd070nzje4duvkzmfem", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "authz": { - "authorization": [] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1zx9v7cdvltsgyzymc6wwd070nzje4duvkzmfem", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1y4e9e3srnuqcul7839cmttc4rjct2gece5y34l", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos139lr5vxkr94lf85nr4zc4dvmj52qmmn0er2hrf", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "feegrant": { - "allowances": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1zx9v7cdvltsgyzymc6wwd070nzje4duvkzmfem", - "validator_address": "cosmosvaloper1zx9v7cdvltsgyzymc6wwd070nzje4duvnk0u4g", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "3v9tEIBIsTBYs45lfoaBa1DtropTwNPjxWRKNyh1unI=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "bb5010f06487a9d315952e3e57c6dadac4ac425b@192.168.50.214:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AobtY30Q9FSuaB83i0DaK8gMU89R5BF9D5EiQZxltYj9" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "xpbYGwOWxrHHn45537a7C3wJJPs/I5Ccx3uq9ZzoEHMyOssoynjbqPsNVNj9ymcLqy4OYVMojbfqmSbMJuVe/Q==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0", - "params": { - "max_expected_time_per_block": "30000000000" - } - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "packetfowardmiddleware": { - "params": { - "fee_percentage": "0.000000000000000000" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/config/gentx/gentx-bb5010f06487a9d315952e3e57c6dadac4ac425b.json b/ci/chains/gaia/v6.0.0/ibc-1/config/gentx/gentx-bb5010f06487a9d315952e3e57c6dadac4ac425b.json deleted file mode 100644 index 74f5f94556..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/config/gentx/gentx-bb5010f06487a9d315952e3e57c6dadac4ac425b.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1zx9v7cdvltsgyzymc6wwd070nzje4duvkzmfem","validator_address":"cosmosvaloper1zx9v7cdvltsgyzymc6wwd070nzje4duvnk0u4g","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"3v9tEIBIsTBYs45lfoaBa1DtropTwNPjxWRKNyh1unI="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"bb5010f06487a9d315952e3e57c6dadac4ac425b@192.168.50.214:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AobtY30Q9FSuaB83i0DaK8gMU89R5BF9D5EiQZxltYj9"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["xpbYGwOWxrHHn45537a7C3wJJPs/I5Ccx3uq9ZzoEHMyOssoynjbqPsNVNj9ymcLqy4OYVMojbfqmSbMJuVe/Q=="]} diff --git a/ci/chains/gaia/v6.0.0/ibc-1/config/node_key.json b/ci/chains/gaia/v6.0.0/ibc-1/config/node_key.json deleted file mode 100644 index 4649f65656..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"aqiLE2A0F5uKvpi0NoZTd+SN0hU1sJdb/ZecNbkmv9q6yo1iuGVhveMMsAnzrPmox4lyLlgINPny0nOhizn2Zw=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v6.0.0/ibc-1/config/priv_validator_key.json deleted file mode 100644 index 807e3970c2..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "8B39C329C83C7DE2A2F345C384B36F05A84F2AC0", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "3v9tEIBIsTBYs45lfoaBa1DtropTwNPjxWRKNyh1unI=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "GBxwdlqDA1bc4mgTEtS/+NMxRXOucVfeOlFtaheA6YXe/20QgEixMFizjmV+hoFrUO2uilPA0+PFZEo3KHW6cg==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/118acf61acfae082089bc69ce6bfcf98a59ab78c.address b/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/118acf61acfae082089bc69ce6bfcf98a59ab78c.address deleted file mode 100644 index 7230415964..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/118acf61acfae082089bc69ce6bfcf98a59ab78c.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzoxOS41MDEyNDE5ODMgLTA1MDAgRVNUIG09KzAuMDY5NzExMzQzIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiU25yZDdpRkliTkpYZ1c5RyJ9.lZtxW-Goo8loYIEykGq-NVYIohLZxK2FeO3jlQnon1I3HVi_3UCTGw.JPk6bE9slrEzRd1Y.DbE0LNFkwPCjhaIpHiVYywZRMkR5cs1Q1ma9L_bc1dqflzYG2ZtdkPpgE02e92zdkjRCaOz-U2l4k4AH95Li6rzP7p61XiR-RFo7qt-YIaOZvRoAyNfoCdZyO26JrzAX2b1hcP3re_WYxPz3k8hArJE_hNOdR6hxC2EopEC5CbCfXqilaPkEgLePCJKrqbMVW3vzcAVO1fjbn8pxVxvld3H26BB31ZVuDHR_e6upKg7XDyFq6F9cpbWG.I9JzJS5586b9eQyGVEmHbg \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/25725cc6039f018e7fc78971b5af151cb0b52338.address b/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/25725cc6039f018e7fc78971b5af151cb0b52338.address deleted file mode 100644 index 74a2a9c79a..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/25725cc6039f018e7fc78971b5af151cb0b52338.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzoyMC41Nzc2NjE0IC0wNTAwIEVTVCBtPSswLjA2MjE1NTkzMCIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6IjdVMmFaa01mZmlDazJWVlUifQ._Eam3zPOJ_9z31QMf2vGarrSCy-8OO6R8V3oiFPoPLJWs9kDLZs4KQ.G1a_5-gTtaFLTzNe.8e8vQWKINIYpXRg2sEW2QNrh2AhiyM39U5lqDQoLmeaozfle33Yyl2b-0eRYvOxUL9KoeUpLGtOa6ZoWAjla94TdUfWbUzHegNkP8-ibRD-u1LoHizRzIBHsTi5QoVdQGVJqxW22vb23t3OVjH2VVrzsQDrwGliFMR-nQT3GccTO6LowDGa7atVjJnc0qyEsrZ9NqvBJRM0A1N3ns0_MTCVIFlRCQr2mQ8j4NZmr8e-4Ww.wsvyHcDA3JyxpHTO9El4kA \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/897e3a30d6196bf49e931d458ab59b95140dee6f.address b/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/897e3a30d6196bf49e931d458ab59b95140dee6f.address deleted file mode 100644 index 47b98bfec9..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/897e3a30d6196bf49e931d458ab59b95140dee6f.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzoyMS42NjQ0NTUxOTMgLTA1MDAgRVNUIG09KzAuMDczNjg5OTMxIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiM09NSHpGS2ZzcnRZcUpiaCJ9.JKnoJPHUxymhduvxIJ9Y3uIpjlDXAhdzM0s6h2V2YwkL1PKPr0MJWA.MVCYrgojv-yJgZe8.r_z_gW4tANrbjpRj4k37TOxYrm3mHs7aJAwHtW6iVgOHhLljrcVJP0JdoqzvRYZpDxpayIovEFlEXpnopVjgZV0XpqJZoLmjFfaP2yOqbsrSOk7j86CEZPoGxLZqThL7IUQ1Fji3jYoplTSoOfaYQS40fBQgoLBgv4ZD_Vh2FEa45fP6-PEzHWo0-_kMyWxWXMM6KbzGDaN02aU8AAThF98iUo1GHHGCzjuRGW1OyvuHBR198nk.x1JgVMjEXbH5qEZf1-tNfg \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/user.info b/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/user.info deleted file mode 100644 index e53db9c699..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzoyMC41NzIwMjI0MzggLTA1MDAgRVNUIG09KzAuMDU2NTE2OTY2IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiNmhYeVNDaXNFNDRmYU50NSJ9.yXnz86KYtSpKGF9_xqy80tu_X3j9azA50Gy0IlYUkDFJzhOBt8J85w.hWiGnawDFEJ21SRW.Pmkw-7YANEweAfCoO4cwwiiuaRvUcRReXkk3MYqoSctKKJc5M1STXIYhxKq8R1nxhMeX6bzdRpJwmVcWU6wEUPBgNnJENykxDwxpxisMsZn1orL3ySc1r60Yh7-AgriYoLXfdyBcHM3xi0wglQV9cRqxNhp6cB6qM0YP3WIfuW48yh5k-1Ke3SDrEOLHbAHMLR_KjCnI7NuyPFvrL-FbcyNsYNkI3NIJ8uDyMtaVxNwB-8C2WuIBmPae47iqAG4YXhAxVG7XXYmYKmQH08EvGsHiXB4IL6Omr2VHYKvSZIVIvzWX9W1UwlYpCIu_hf1yPEbBE0287_3Ymd06J2ZUB5R9Gz7wP8w.6dnU167tXcmROvvJnZWNxw \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/user2.info b/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/user2.info deleted file mode 100644 index 097d150a78..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzoyMS42NTM4MzY1MDkgLTA1MDAgRVNUIG09KzAuMDYzMDcxMjMyIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiUHJqSDV5OFFueHQwRDRpaSJ9.jcO3HbJ7y45QeK4grQ5oET_W8LMAwcCAhjuji_MH4b7a8gwVGhNdiQ.AncSbV6zoozvh9B2._VmB-1kpDhTFQkcj3p4vYjO4r8PP_hUBAiaTKA9MMWDhc6wJgUbZe4ETgAic8KpVsD6MRIEwtaUPZQ5A6OJILNsojKpEOw1bZlWGVCugvOvX_yn6bGwjDWd3hQ6m1NB7Pq9AqL7LMQ4Vq5PhoHyKqprJVeIVnq8emTMQsnM3jF5xJEnVNRixAhaA7UJQkXZrKyfmsUmDSLyCS647TNLnjltQLx_wBO08gggYSkEavsWs9klWyqNF7_Yil8GNKnCtOV1EDJfJBUW7k3bDb9KkNtqVRgmQLBdrQUQKQHeKyR9ufIXasjsuF_uuwyRYZfEcRGu-4JaL1WDkbkBBQCCAebeZfCyxYHSa.IUZFTm6h9sgvk8enEW-7Lw \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/validator.info deleted file mode 100644 index 5284d5847b..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMS0xMS0yNCAxNDozMzoxOS40OTM2MzgxNDUgLTA1MDAgRVNUIG09KzAuMDYyMTA3NTAxIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiM3k0NDJvdU5PemotQmtaaCJ9.AypJs2QRWNAionV2YbpSj4iMUlXK6ve5AbW6dnqsKw_J_CHcjqWzig.WDpu5XC5HpWC43r4._f8eYLRJEFjyYTrAZS0N6dCyeEwpz-is9_1kjHmfWgVw5PYZxVrdI83S4t12Lh_jRhk2REPDVMbiG5-E0dEXNuV6o1tymAWZT1Vkw9EbzS-J51hzeDt4b-SvIe7XUJqwE67smbK22_o6dW8AcEd10p96jIqzO9jjoJQ_KXkWmYKgOv561K40wLew8thC3bdyMpwB50z0aywSTPHmfzBcI_ugni_RyIqtvLREraDHBcTAq9gwQTESKDbhtmOdvSUez9GBHhbHHf-95qz0d7VnuJXTg4h6dOwo2IF5dK5KENw9Ry_61nTvHTV3-tkh826gWWg4MkXjZGdQ5H-a0age-wN8N8oiJeIiE6WZXrHL9UpdwGX8.d7VnQTd1I5Ct5IcwkXbYEg \ No newline at end of file diff --git a/ci/chains/gaia/v6.0.0/ibc-1/user2_seed.json b/ci/chains/gaia/v6.0.0/ibc-1/user2_seed.json deleted file mode 100644 index 44a4a4ab38..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos139lr5vxkr94lf85nr4zc4dvmj52qmmn0er2hrf","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"AquM1jLXtEDMVXLMvXr8Ny/lDySZ8GbJdQemhfRewpnA\"}","mnemonic":"cousin group crucial ready save drama piece time ordinary language one mind begin sound clever express broom limb foil size choose salute lazy gold"} diff --git a/ci/chains/gaia/v6.0.0/ibc-1/user_seed.json b/ci/chains/gaia/v6.0.0/ibc-1/user_seed.json deleted file mode 100644 index 6c2ad92777..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos1y4e9e3srnuqcul7839cmttc4rjct2gece5y34l","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"A8c8vRcad30nDg3JJV8Iczt5Jz+wmaJYAwHJ3R18whn+\"}","mnemonic":"horn wait grace stool course spot one inject gloom fat crime usual horn kitten canoe glove foam cake reopen oblige silver favorite uniform cry"} diff --git a/ci/chains/gaia/v6.0.0/ibc-1/validator_seed.json b/ci/chains/gaia/v6.0.0/ibc-1/validator_seed.json deleted file mode 100644 index 13fe0a134c..0000000000 --- a/ci/chains/gaia/v6.0.0/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1zx9v7cdvltsgyzymc6wwd070nzje4duvkzmfem","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"AobtY30Q9FSuaB83i0DaK8gMU89R5BF9D5EiQZxltYj9\"}","mnemonic":"latin trial chronic shadow sugar raw clay village face hurry world melody choose escape post angry casino scale unit emotion beyond cheese shiver addict"} diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/addrbook.json b/ci/chains/gaia/v7.0.1/ibc-0/config/addrbook.json deleted file mode 100644 index 06fb719cd9..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/addrbook.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "key": "f79fd5952365514d9426b513", - "addrs": [] -} \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/app.toml b/ci/chains/gaia/v7.0.1/ibc-0/config/app.toml deleted file mode 100644 index 7585d6e7ca..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/app.toml +++ /dev/null @@ -1,196 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current and previous state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -# IavlCacheSize set the size of the iavl tree cache. -# Default cache size is 50mb. -iavl-cache-size = 781250 - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### Rosetta Configuration ### -############################################################################### - -[rosetta] - -# Enable defines if the Rosetta API server should be enabled. -enable = false - -# Address defines the Rosetta API server to listen on. -address = ":8080" - -# Network defines the name of the blockchain that will be returned by Rosetta. -blockchain = "app" - -# Network defines the name of the network that will be returned by Rosetta. -network = "network" - -# Retries defines the number of retries when connecting to the node before failing. -retries = 3 - -# Offline defines if Rosetta server should run in offline mode. -offline = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### gRPC Web Configuration ### -############################################################################### - -[grpc-web] - -# GRPCWebEnable defines if the gRPC-web should be enabled. -# NOTE: gRPC must also be enabled, otherwise, this configuration is a no-op. -enable = false - -# Address defines the gRPC-web server address to bind to. -address = "0.0.0.0:9091" - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enable-unsafe-cors = false - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 1000 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 10 diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/client.toml b/ci/chains/gaia/v7.0.1/ibc-0/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/config.toml b/ci/chains/gaia/v7.0.1/ibc-0/config/config.toml deleted file mode 100644 index 473abd0295..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/config.toml +++ /dev/null @@ -1,428 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-0" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# Experimental parameter to specify the maximum number of events a node will -# buffer, per subscription, before returning an error and closing the -# subscription. Must be set to at least 100, but higher values will accommodate -# higher event throughput rates (and will use more memory). -experimental_subscription_buffer_size = 200 - -# Experimental parameter to specify the maximum number of RPC responses that -# can be buffered per WebSocket client. If clients cannot read from the -# WebSocket endpoint fast enough, they will be disconnected, so increasing this -# parameter may reduce the chances of them being disconnected (but will cause -# the node to use more memory). -# -# Must be at least the same as "experimental_subscription_buffer_size", -# otherwise connections could be dropped unnecessarily. This value should -# ideally be somewhat higher than "experimental_subscription_buffer_size" to -# accommodate non-subscription-related RPC responses. -experimental_websocket_write_buffer_size = 200 - -# If a WebSocket client cannot read fast enough, at present we may -# silently drop events instead of generating an error or disconnecting the -# client. -# -# Enabling this experimental parameter will cause the WebSocket connection to -# be closed instead if it cannot read fast enough, allowing for greater -# predictability in subscription behaviour. -experimental_close_on_slow_client = false - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. ip and port are required -# example: 159.89.10.97:26656 -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/genesis.json b/ci/chains/gaia/v7.0.1/ibc-0/config/genesis.json deleted file mode 100644 index 70889fa4a4..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/genesis.json +++ /dev/null @@ -1,375 +0,0 @@ -{ - "genesis_time": "2022-05-06T10:04:49.897046Z", - "chain_id": "ibc-0", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos16njyvav3vw3pdwqp9r3hrn9s46gam669pd09g9", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1zj5weejyt0phez2uml5d4qpc2q0xvlrs47wc6c", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1sphll46wcyjw46wsfpf5qgnsjhm826gguh4rcw", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "authz": { - "authorization": [] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos1zj5weejyt0phez2uml5d4qpc2q0xvlrs47wc6c", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1sphll46wcyjw46wsfpf5qgnsjhm826gguh4rcw", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos16njyvav3vw3pdwqp9r3hrn9s46gam669pd09g9", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "feegrant": { - "allowances": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-0", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos1sphll46wcyjw46wsfpf5qgnsjhm826gguh4rcw", - "validator_address": "cosmosvaloper1sphll46wcyjw46wsfpf5qgnsjhm826ggerpk5a", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "DjvU3nA6MHikZs0esilo5NfNi7e3PpAhWtRK/HS8tEA=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "3494ad7d61f006035641dbcd204dcbdb25904c2b@192.168.1.80:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "A9JDjNFV9GL3nqTdRRlWjOZXO4MDSqVkcFsN0Q8zsWuC" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "dZpQRC0ydRoumK+s6NQUD+mu2BLxGpsvglEbl2QRMTlIfL2F47an/4lKswZ6dv+SBzKMvAYKzSzcj59lbRVDIA==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0", - "params": { - "max_expected_time_per_block": "30000000000" - } - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "interchainaccounts": { - "controller_genesis_state": { - "active_channels": [], - "interchain_accounts": [], - "ports": [], - "params": { - "controller_enabled": true - } - }, - "host_genesis_state": { - "active_channels": [], - "interchain_accounts": [], - "port": "icahost", - "params": { - "host_enabled": true, - "allow_messages": [] - } - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "packetfowardmiddleware": { - "params": { - "fee_percentage": "0.000000000000000000" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/gentx/gentx-3494ad7d61f006035641dbcd204dcbdb25904c2b.json b/ci/chains/gaia/v7.0.1/ibc-0/config/gentx/gentx-3494ad7d61f006035641dbcd204dcbdb25904c2b.json deleted file mode 100644 index c6bed88969..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/gentx/gentx-3494ad7d61f006035641dbcd204dcbdb25904c2b.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-0","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos1sphll46wcyjw46wsfpf5qgnsjhm826gguh4rcw","validator_address":"cosmosvaloper1sphll46wcyjw46wsfpf5qgnsjhm826ggerpk5a","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"DjvU3nA6MHikZs0esilo5NfNi7e3PpAhWtRK/HS8tEA="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"3494ad7d61f006035641dbcd204dcbdb25904c2b@192.168.1.80:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"A9JDjNFV9GL3nqTdRRlWjOZXO4MDSqVkcFsN0Q8zsWuC"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["dZpQRC0ydRoumK+s6NQUD+mu2BLxGpsvglEbl2QRMTlIfL2F47an/4lKswZ6dv+SBzKMvAYKzSzcj59lbRVDIA=="]} diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/node_key.json b/ci/chains/gaia/v7.0.1/ibc-0/config/node_key.json deleted file mode 100644 index a68b2e4f43..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"IMD/3+Fiy+a+cJcCpn1YylTJhiqwCnRW7MrGJaxqxWtngftDoTFOGbEXGVsrTAUBG41JjsfjkOmLmUm2mDeYWw=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/config/priv_validator_key.json b/ci/chains/gaia/v7.0.1/ibc-0/config/priv_validator_key.json deleted file mode 100644 index 1e40fbcbd1..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "5C8DC809794222F5F6AE922BB7B1763397F50CC2", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "DjvU3nA6MHikZs0esilo5NfNi7e3PpAhWtRK/HS8tEA=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "XOm68Zz6f0OCUw1BDUuD2bYP76mx12XBbaHaKxmimOMOO9TecDoweKRmzR6yKWjk182Lt7c+kCFa1Er8dLy0QA==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/14a8ece6445bc37c895cdfe8da8038501e667c70.address b/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/14a8ece6445bc37c895cdfe8da8038501e667c70.address deleted file mode 100644 index a6366a58fd..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/14a8ece6445bc37c895cdfe8da8038501e667c70.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNDo1My4xODkzODQgKzAyMDAgQ0VTVCBtPSswLjA1MjcyMzU0MCIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6Im96WmhEVnBJSXZ4RTNUN0YifQ.9TuQlMEndxJpH_l9rlzqmz6_xJMRU8JL1LwuUkgdfY_WtHb75oFQqA.zegihqG91bm2Q-Wu.dXK6_FULn11-n4PxiagGZVLt1rP6agaZTjT5j4PmyVuP3Rl6jW1Vti0mdnFz_ULtj7uXZAPOwC7iSmI_0Ta1DApaxSaTJFO1J2atgg0TrVpQQY-ifjD4duD8r_PWDahxicsbw1TwEWRWwBzrCsuGsEP-2oGIiAWZuBaU4DjbyLpEKkF0UByNy1jAcIkq9uZFht-1hWKqvug00JkEzBq2Oq96xTEy6gUNI3rD5kouDY-WNgUBF5M.3sXBzzHa6KCcqr3w1bUDMw \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/806fffd74ec124eae9d0485340227095f6756908.address b/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/806fffd74ec124eae9d0485340227095f6756908.address deleted file mode 100644 index f5596e8975..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/806fffd74ec124eae9d0485340227095f6756908.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNDo1MS4wMDA5MjkgKzAyMDAgQ0VTVCBtPSswLjA3MTg0NDEwMCIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6IlhvM2JXdXBNSXNScEI1QXgifQ.ndlbfgx_pi8Okmvhy4kxNASy15rGXUPgn25_FTiQrIhxrSdYOiCFGQ.efoaTYk4NOoLvJei.RnHvhAgBZpn_yOES2N8HNBpcK6buXZbDbR5HuYUOVQlr9i0CCZtpij7tlJ52FuOOxjj2qf2NPAqDFq2FMWlm6h4XT_Yd_X2O1uH9EaP2Hyb-FZsXAQDDmJm7Ns6qG4CNV5qXK37z_0GrCnhYxs8en7VxESRSFQBPsKN2cCZ6_pkAENfnK8hehekQsIi7G9ltTuqWXAkl-HL2AvX_kZK4uNkSJyQEAEgCAtm763gI-pDl7v7tLmIRudhI.geHt2gy-LGm-Ukyhp4ppIg \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/d4e446759163a216b80128e371ccb0ae91ddeb45.address b/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/d4e446759163a216b80128e371ccb0ae91ddeb45.address deleted file mode 100644 index 17b4394e78..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/d4e446759163a216b80128e371ccb0ae91ddeb45.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNDo1Mi4wOTg4OSArMDIwMCBDRVNUIG09KzAuMDYyMzE4Njg1IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiOE8zbUYtcDI1cG9VM2FOWCJ9.d6Pd5xSMIIzL2BGSu7eZ6XmETqXW9cNbhnO4q0d_QcFonORm9QrEIw.BoBvr9OIe60yDpR-.lAStGSI2Qlwa_DycEKRwVG2ruv1xb5TA0wcF_1XMdorszAu_6n9QwDkikWWA5btcUraaCD9IjJbGpMmp2QBW6mFymwaKngTb8yr4aHkObruhQq_Kl_bB7vckmFDpOzcOBcl_ImQSrFdf-pCFaofRkyley9K7cCSUxD8aLgqy9PDjCd7GEXfIo3NW4s0njxUqpfPMrZdOOz-8ncb7Qsq9NmV3FDk08ezgf0a9L_OJnd7Wuw.3Sdzjy4E62NhjVk-2p7x_w \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/user.info b/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/user.info deleted file mode 100644 index 46206e6ef1..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNDo1Mi4wOTM1MjkgKzAyMDAgQ0VTVCBtPSswLjA1Njk1NzE3NiIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6ImYwMHVnS2FKMmJxV2FyVXUifQ.ErunVpmVMSz4PXguTcg-JXH653Kh0Ay8DR9LsSFbBUYjXOpB5lgr1g.0nDxh8n9F7zdzCI6.MmP_lgaMBJbm0zvwlUjzSyVSJjZlK47yEsWkTgIA_L4PjhpSRXLic5vMF1pBreuI8FGYrV_9QGa_hpZHNFct3YtCGLKz5haw2lXb9O42U-NA5xP_lqC4tBJggaK2o6LB06iE8o-nHU6WqHVNLFsodsq7M0v17p8G4WkCvTqmQeRmWd7HJBfhJBDE7P4jtPfLiw8tX8hGGsMDCGFtalYeVfOpExhPSawjPZ3T7X20dMAmdZfFSjRCm-io540jlo6MoG30oTVpml2RjeYTQO98fqfUcSSDOmdUxQnkAj1ODYlbTrnAORWYp8lPK_ZxgKjmO0DlKruGDshegmLKeXSNBRF-1DdIEO8.jpzhxNdkgpheBstdI_BoLw \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/user2.info b/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/user2.info deleted file mode 100644 index 8781cc1ced..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNDo1My4xODQyMTcgKzAyMDAgQ0VTVCBtPSswLjA0NzU1NTkxMiIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6Img0VGJoNW1PaWh4Vi1xM0sifQ.ZFXGm6O4wc9k-Z4TT3d9oDnTo8tN0Jd9U64XXBjH0sQvTT-LEpqCCA.pGUBYNikHGTsV817.tsbXA6vDHeiVjoixvpusx2fbznnF34O8kPVTdO2iACNGheU9GFtqZEoC7NENSFyx9l8OlJ6rXNyuBP4G-v65qo2kHwR46vF5stIH_Vsi1uuBjxlzGdbM-i6xMA_hYiHizHTcKwFni6kLKUVm2e6tkg-pbWmekC9JG5wd65UcsbbX8onWWZimNHuI7zy18cw79oSjuOz7LcOJgyGFjLQd1eOf1WloJsqi1GCjpJtIGp60Fp0C4nBN6aQjvsZsY7KE5v6yxIW8jkKLRts0DliqrVJ45OrCBLi2xElu_xA9uUPpDklTtRE-im9uPAFY4fPGtkzZjHhlmef4GFh0j1hULBSwAHwcxp1o.ZhDB2WAdJnquIRiolF8btg \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/validator.info b/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/validator.info deleted file mode 100644 index d9eda6ac5a..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNDo1MC45OTQ4NDEgKzAyMDAgQ0VTVCBtPSswLjA2NTc1NTg5NyIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6ImZLZXdhNERPVVZLb3dvY0cifQ.HOh51oKn3a-uakDKp5AzitIiHc7s1USNaDsl6DoCAZVcnA65tupmGQ.bZEWLUq4BQxGWHfX.Eltt1QI7XgQ-3jz5z1jXY-PlYgKJQxET2eNwknpQpUewCjNNlLbgiIlU4EG-35XlqC4j9beUirAIC-zHuD_Knzk78N2w88CHOKnyJQp4RbaDjZIftHTCtAXm--eAH3sDYWnHm2HGvV-pPRYmsy2aP2KQtBi9npBTvzAMHKcpdqZBVwkii4KzKSiD3OkIgiukceVvp_sMCKDIv3FfoxhIeqakop5LcZCjYkUywV6ArF8uWtWGC4Huy6fR8MkEC7K2G5hSVt6RMKFBfegdKwmZ43Owm0KnUoIZglNloZr-NBcdxAQ39QIPbNS2UpjfhdSAqi5yQcTWGDwC-6myHqwYb1Thgh9Uv1S-hGU0QNW7str6rdjy.-N7Jpvj3HlD6YvRz7kljHg \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-0/user2_seed.json b/ci/chains/gaia/v7.0.1/ibc-0/user2_seed.json deleted file mode 100644 index e0e81ee9bb..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos1zj5weejyt0phez2uml5d4qpc2q0xvlrs47wc6c","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"A7T4E+QhhEj1GkBDfiHEYcJZpptfnt9hSlSZMO1DBjky\"}","mnemonic":"amateur copy funny cabin thank aerobic despair actress exist sadness reopen science deny output extend huge motion slow blouse spoon salmon hollow spawn great"} diff --git a/ci/chains/gaia/v7.0.1/ibc-0/user_seed.json b/ci/chains/gaia/v7.0.1/ibc-0/user_seed.json deleted file mode 100644 index 4f732a1532..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos16njyvav3vw3pdwqp9r3hrn9s46gam669pd09g9","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"A/8Q8+2Evq0x+A4MmkkQI/eX1NwcmHmbRgAJTgEQSVU0\"}","mnemonic":"annual usage original ten orbit squirrel physical click hammer bomb inside bulb insane review creek record delay treat law evoke shoe cattle else certain"} diff --git a/ci/chains/gaia/v7.0.1/ibc-0/validator_seed.json b/ci/chains/gaia/v7.0.1/ibc-0/validator_seed.json deleted file mode 100644 index a2dadca940..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-0/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos1sphll46wcyjw46wsfpf5qgnsjhm826gguh4rcw","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"A9JDjNFV9GL3nqTdRRlWjOZXO4MDSqVkcFsN0Q8zsWuC\"}","mnemonic":"brown dress cage biology skin magnet wire spread cream rookie aspect october neutral sea core panic thrive sample grow tool isolate squirrel mesh actress"} diff --git a/ci/chains/gaia/v7.0.1/ibc-1/config/app.toml b/ci/chains/gaia/v7.0.1/ibc-1/config/app.toml deleted file mode 100644 index 7585d6e7ca..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/config/app.toml +++ /dev/null @@ -1,196 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Base Configuration ### -############################################################################### - -# The minimum gas prices a validator is willing to accept for processing a -# transaction. A transaction's fees must meet the minimum of any denomination -# specified in this config (e.g. 0.25token1;0.0001token2). -minimum-gas-prices = "" - -# default: the last 100 states are kept in addition to every 500th state; pruning at 10 block intervals -# nothing: all historic states will be saved, nothing will be deleted (i.e. archiving node) -# everything: all saved states will be deleted, storing only the current and previous state; pruning at 10 block intervals -# custom: allow pruning options to be manually specified through 'pruning-keep-recent', 'pruning-keep-every', and 'pruning-interval' -pruning = "default" - -# These are applied if and only if the pruning strategy is custom. -pruning-keep-recent = "0" -pruning-keep-every = "0" -pruning-interval = "0" - -# HaltHeight contains a non-zero block height at which a node will gracefully -# halt and shutdown that can be used to assist upgrades and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-height = 0 - -# HaltTime contains a non-zero minimum block time (in Unix seconds) at which -# a node will gracefully halt and shutdown that can be used to assist upgrades -# and testing. -# -# Note: Commitment of state will be attempted on the corresponding block. -halt-time = 0 - -# MinRetainBlocks defines the minimum block height offset from the current -# block being committed, such that all blocks past this offset are pruned -# from Tendermint. It is used as part of the process of determining the -# ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates -# that no blocks should be pruned. -# -# This configuration value is only responsible for pruning Tendermint blocks. -# It has no bearing on application state pruning which is determined by the -# "pruning-*" configurations. -# -# Note: Tendermint block pruning is dependant on this parameter in conunction -# with the unbonding (safety threshold) period, state pruning and state sync -# snapshot parameters to determine the correct minimum value of -# ResponseCommit.RetainHeight. -min-retain-blocks = 0 - -# InterBlockCache enables inter-block caching. -inter-block-cache = true - -# IndexEvents defines the set of events in the form {eventType}.{attributeKey}, -# which informs Tendermint what to index. If empty, all events will be indexed. -# -# Example: -# ["message.sender", "message.recipient"] -index-events = [] - -# IavlCacheSize set the size of the iavl tree cache. -# Default cache size is 50mb. -iavl-cache-size = 781250 - -############################################################################### -### Telemetry Configuration ### -############################################################################### - -[telemetry] - -# Prefixed with keys to separate services. -service-name = "" - -# Enabled enables the application telemetry functionality. When enabled, -# an in-memory sink is also enabled by default. Operators may also enabled -# other sinks such as Prometheus. -enabled = false - -# Enable prefixing gauge values with hostname. -enable-hostname = false - -# Enable adding hostname to labels. -enable-hostname-label = false - -# Enable adding service to labels. -enable-service-label = false - -# PrometheusRetentionTime, when positive, enables a Prometheus metrics sink. -prometheus-retention-time = 0 - -# GlobalLabels defines a global set of name/value label tuples applied to all -# metrics emitted using the wrapper functions defined in telemetry package. -# -# Example: -# [["chain_id", "cosmoshub-1"]] -global-labels = [ -] - -############################################################################### -### API Configuration ### -############################################################################### - -[api] - -# Enable defines if the API server should be enabled. -enable = false - -# Swagger defines if swagger documentation should automatically be registered. -swagger = false - -# Address defines the API server to listen on. -address = "tcp://0.0.0.0:1317" - -# MaxOpenConnections defines the number of maximum open connections. -max-open-connections = 1000 - -# RPCReadTimeout defines the Tendermint RPC read timeout (in seconds). -rpc-read-timeout = 10 - -# RPCWriteTimeout defines the Tendermint RPC write timeout (in seconds). -rpc-write-timeout = 0 - -# RPCMaxBodyBytes defines the Tendermint maximum response body (in bytes). -rpc-max-body-bytes = 1000000 - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enabled-unsafe-cors = false - -############################################################################### -### Rosetta Configuration ### -############################################################################### - -[rosetta] - -# Enable defines if the Rosetta API server should be enabled. -enable = false - -# Address defines the Rosetta API server to listen on. -address = ":8080" - -# Network defines the name of the blockchain that will be returned by Rosetta. -blockchain = "app" - -# Network defines the name of the network that will be returned by Rosetta. -network = "network" - -# Retries defines the number of retries when connecting to the node before failing. -retries = 3 - -# Offline defines if Rosetta server should run in offline mode. -offline = false - -############################################################################### -### gRPC Configuration ### -############################################################################### - -[grpc] - -# Enable defines if the gRPC server should be enabled. -enable = true - -# Address defines the gRPC server address to bind to. -address = "0.0.0.0:9090" - -############################################################################### -### gRPC Web Configuration ### -############################################################################### - -[grpc-web] - -# GRPCWebEnable defines if the gRPC-web should be enabled. -# NOTE: gRPC must also be enabled, otherwise, this configuration is a no-op. -enable = false - -# Address defines the gRPC-web server address to bind to. -address = "0.0.0.0:9091" - -# EnableUnsafeCORS defines if CORS should be enabled (unsafe - use it at your own risk). -enable-unsafe-cors = false - -############################################################################### -### State Sync Configuration ### -############################################################################### - -# State sync snapshots allow other nodes to rapidly join the network without replaying historical -# blocks, instead downloading and applying a snapshot of the application state at a given height. -[state-sync] - -# snapshot-interval specifies the block interval at which local state sync snapshots are -# taken (0 to disable). Must be a multiple of pruning-keep-every. -snapshot-interval = 1000 - -# snapshot-keep-recent specifies the number of recent snapshots to keep and serve (0 to keep all). -snapshot-keep-recent = 10 diff --git a/ci/chains/gaia/v7.0.1/ibc-1/config/client.toml b/ci/chains/gaia/v7.0.1/ibc-1/config/client.toml deleted file mode 100644 index 222695a3f8..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/config/client.toml +++ /dev/null @@ -1,17 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -############################################################################### -### Client Configuration ### -############################################################################### - -# The network chain ID -chain-id = "" -# The keyring's backend, where the keys are stored (os|file|kwallet|pass|test|memory) -keyring-backend = "os" -# CLI output format (text|json) -output = "text" -# : to Tendermint RPC interface for this chain -node = "tcp://localhost:26657" -# Transaction broadcasting mode (sync|async|block) -broadcast-mode = "sync" diff --git a/ci/chains/gaia/v7.0.1/ibc-1/config/config.toml b/ci/chains/gaia/v7.0.1/ibc-1/config/config.toml deleted file mode 100644 index b2c4196901..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/config/config.toml +++ /dev/null @@ -1,428 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or -# relative to the home directory (e.g. "data"). The home directory is -# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable -# or --home cmd flag. - -####################################################################### -### Main Base Config Options ### -####################################################################### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "ibc-1" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb -# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) -# - pure go -# - stable -# * cleveldb (uses levigo wrapper) -# - fast -# - requires gcc -# - use cleveldb build tag (go build -tags cleveldb) -# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) -# - EXPERIMENTAL -# - may be faster is some use-cases (random reads - indexer) -# - use boltdb build tag (go build -tags boltdb) -# * rocksdb (uses github.com/tecbot/gorocksdb) -# - EXPERIMENTAL -# - requires gcc -# - use rocksdb build tag (go build -tags rocksdb) -# * badgerdb (uses github.com/dgraph-io/badger) -# - EXPERIMENTAL -# - use badgerdb build tag (go build -tags badgerdb) -db_backend = "goleveldb" - -# Database directory -db_dir = "data" - -# Output level for logging, including package level options -log_level = "info" - -# Output format: 'plain' (colored text) or 'json' -log_format = "plain" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_key_file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv_validator_state_file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv_validator_laddr = "" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - - -####################################################################### -### Advanced Configuration Options ### -####################################################################### - -####################################################### -### RPC Server Configuration Options ### -####################################################### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# A list of origins a cross-domain request can be executed from -# Default value '[]' disables cors support -# Use '["*"]' to allow any origin -cors_allowed_origins = [] - -# A list of methods the client is allowed to use with cross-domain requests -cors_allowed_methods = ["HEAD", "GET", "POST", ] - -# A list of non simple headers the client is allowed to use with cross-domain requests -cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -grpc_max_open_connections = 900 - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -# Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc_max_open_connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -max_open_connections = 900 - -# Maximum number of unique clientIDs that can /subscribe -# If you're using /broadcast_tx_commit, set to the estimated maximum number -# of broadcast_tx_commit calls per block. -max_subscription_clients = 100 - -# Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. -max_subscriptions_per_client = 5 - -# Experimental parameter to specify the maximum number of events a node will -# buffer, per subscription, before returning an error and closing the -# subscription. Must be set to at least 100, but higher values will accommodate -# higher event throughput rates (and will use more memory). -experimental_subscription_buffer_size = 200 - -# Experimental parameter to specify the maximum number of RPC responses that -# can be buffered per WebSocket client. If clients cannot read from the -# WebSocket endpoint fast enough, they will be disconnected, so increasing this -# parameter may reduce the chances of them being disconnected (but will cause -# the node to use more memory). -# -# Must be at least the same as "experimental_subscription_buffer_size", -# otherwise connections could be dropped unnecessarily. This value should -# ideally be somewhat higher than "experimental_subscription_buffer_size" to -# accommodate non-subscription-related RPC responses. -experimental_websocket_write_buffer_size = 200 - -# If a WebSocket client cannot read fast enough, at present we may -# silently drop events instead of generating an error or disconnecting the -# client. -# -# Enabling this experimental parameter will cause the WebSocket connection to -# be closed instead if it cannot read fast enough, allowing for greater -# predictability in subscription behaviour. -experimental_close_on_slow_client = false - -# How long to wait for a tx to be committed during /broadcast_tx_commit. -# WARNING: Using a value larger than 10s will result in increasing the -# global HTTP write timeout, which applies to all connections and endpoints. -# See https://github.com/tendermint/tendermint/issues/3435 -timeout_broadcast_tx_commit = "10s" - -# Maximum size of request body, in bytes -max_body_bytes = 1000000 - -# Maximum size of request header, in bytes -max_header_bytes = 1048576 - -# The path to a file containing certificate that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# If the certificate is signed by a certificate authority, -# the certFile should be the concatenation of the server's certificate, any intermediates, -# and the CA's certificate. -# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_cert_file = "" - -# The path to a file containing matching private key that is used to create the HTTPS server. -# Might be either absolute path or path related to Tendermint's config directory. -# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. -# Otherwise, HTTP server is run. -tls_key_file = "" - -# pprof listen address (https://golang.org/pkg/net/http/pprof) -pprof_laddr = "localhost:6060" - -####################################################### -### P2P Configuration Options ### -####################################################### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. ip and port are required -# example: 159.89.10.97:26656 -external_address = "" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -persistent_peers = "" - -# UPNP port forwarding -upnp = false - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr_book_strict = true - -# Maximum number of inbound peers -max_num_inbound_peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -max_num_outbound_peers = 10 - -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional_peer_ids = "" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent_peers_max_dial_period = "0s" - -# Time to wait before flushing messages out on the connection -flush_throttle_timeout = "100ms" - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 5120000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 5120000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow_duplicate_ip = false - -# Peer connection configuration. -handshake_timeout = "20s" -dial_timeout = "3s" - -####################################################### -### Mempool Configuration Option ### -####################################################### -[mempool] - -recheck = true -broadcast = true -wal_dir = "" - -# Maximum number of transactions in the mempool -size = 5000 - -# Limit the total size of all txs in the mempool. -# This only accounts for raw transactions (e.g. given 1MB transactions and -# max_txs_bytes=5MB, mempool will only accept 5 transactions). -max_txs_bytes = 1073741824 - -# Size of the cache (used to filter transactions we saw earlier) in transactions -cache_size = 10000 - -# Do not remove invalid transactions from the cache (default: false) -# Set to true if it's not possible for any invalid transaction to become valid -# again in the future. -keep-invalid-txs-in-cache = false - -# Maximum size of a single transaction. -# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. -max_tx_bytes = 1048576 - -# Maximum size of a batch of transactions to send to a peer -# Including space needed by encoding (one varint per transaction). -# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 -max_batch_bytes = 0 - -####################################################### -### State Sync Configuration Options ### -####################################################### -[statesync] -# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine -# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in -# the network to take and serve state machine snapshots. State sync is not attempted if the node -# has any local state (LastBlockHeight > 0). The node will have a truncated block history, -# starting from the height of the snapshot. -enable = false - -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. -rpc_servers = "" -trust_height = 0 -trust_hash = "" -trust_period = "168h0m0s" - -# Time to spend discovering snapshots before initiating a restore. -discovery_time = "15s" - -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. -temp_dir = "" - -# The timeout duration before re-requesting a chunk, possibly from a different -# peer (default: 1 minute). -chunk_request_timeout = "10s" - -# The number of concurrent chunk fetchers to run (default: 1). -chunk_fetchers = "4" - -####################################################### -### Fast Sync Configuration Connections ### -####################################################### -[fastsync] - -# Fast Sync version to use: -# 1) "v0" (default) - the legacy fast sync implementation -# 2) "v1" - refactor of v0 version for better testability -# 2) "v2" - complete redesign of v0, optimized for testability & readability -version = "v0" - -####################################################### -### Consensus Configuration Options ### -####################################################### -[consensus] - -wal_file = "data/cs.wal/wal" - -# How long we wait for a proposal block before prevoting nil -timeout_propose = "1s" -# How much timeout_propose increases with each round -timeout_propose_delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout_prevote = "1s" -# How much the timeout_prevote increases with each round -timeout_prevote_delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout_precommit = "1s" -# How much the timeout_precommit increases with each round -timeout_precommit_delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout_commit = "1s" - -# How many blocks to look back to check existence of the node's consensus votes before joining consensus -# When non-zero, the node will panic upon restart -# if the same consensus key was used to sign {double_sign_check_height} last blocks. -# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. -double_sign_check_height = 0 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# EmptyBlocks mode and possible interval between empty blocks -create_empty_blocks = true -create_empty_blocks_interval = "0s" - -# Reactor sleep duration parameters -peer_gossip_sleep_duration = "100ms" -peer_query_maj23_sleep_duration = "2s" - -####################################################### -### Transaction Indexer Configuration Options ### -####################################################### -[tx_index] - -# What indexer to use for transactions -# -# The application will set which txs to index. In some cases a node operator will be able -# to decide which txs to index based on configuration set in the application. -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" - -####################################################### -### Instrumentation Configuration Options ### -####################################################### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" - -# Maximum number of simultaneous connections. -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -max_open_connections = 3 - -# Instrumentation namespace -namespace = "tendermint" diff --git a/ci/chains/gaia/v7.0.1/ibc-1/config/genesis.json b/ci/chains/gaia/v7.0.1/ibc-1/config/genesis.json deleted file mode 100644 index 1055a5a68d..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/config/genesis.json +++ /dev/null @@ -1,375 +0,0 @@ -{ - "genesis_time": "2022-05-06T10:05:03.049703Z", - "chain_id": "ibc-1", - "initial_height": "1", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_bytes": "1048576" - }, - "validator": { - "pub_key_types": [ - "ed25519" - ] - }, - "version": {} - }, - "app_hash": "", - "app_state": { - "auth": { - "params": { - "max_memo_characters": "256", - "tx_sig_limit": "7", - "tx_size_cost_per_byte": "10", - "sig_verify_cost_ed25519": "590", - "sig_verify_cost_secp256k1": "1000" - }, - "accounts": [ - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos16gawv9t4xf92lt98qjjzgp6wq8ed55gf49wqef", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos1m3x6hk7nkds6yquvldpsvl35n2q9cmq0fumhh6", - "pub_key": null, - "account_number": "0", - "sequence": "0" - }, - { - "@type": "/cosmos.auth.v1beta1.BaseAccount", - "address": "cosmos12gs7gsx968sgy8fwnzf37veqj02rd6rm96u087", - "pub_key": null, - "account_number": "0", - "sequence": "0" - } - ] - }, - "authz": { - "authorization": [] - }, - "bank": { - "params": { - "send_enabled": [], - "default_send_enabled": true - }, - "balances": [ - { - "address": "cosmos12gs7gsx968sgy8fwnzf37veqj02rd6rm96u087", - "coins": [ - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos16gawv9t4xf92lt98qjjzgp6wq8ed55gf49wqef", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - }, - { - "address": "cosmos1m3x6hk7nkds6yquvldpsvl35n2q9cmq0fumhh6", - "coins": [ - { - "denom": "samoleans", - "amount": "100000000000" - }, - { - "denom": "stake", - "amount": "100000000000" - } - ] - } - ], - "supply": [ - { - "denom": "samoleans", - "amount": "200000000000" - }, - { - "denom": "stake", - "amount": "300000000000" - } - ], - "denom_metadata": [] - }, - "capability": { - "index": "1", - "owners": [] - }, - "crisis": { - "constant_fee": { - "denom": "stake", - "amount": "1000" - } - }, - "distribution": { - "params": { - "community_tax": "0.020000000000000000", - "base_proposer_reward": "0.010000000000000000", - "bonus_proposer_reward": "0.040000000000000000", - "withdraw_addr_enabled": true - }, - "fee_pool": { - "community_pool": [] - }, - "delegator_withdraw_infos": [], - "previous_proposer": "", - "outstanding_rewards": [], - "validator_accumulated_commissions": [], - "validator_historical_rewards": [], - "validator_current_rewards": [], - "delegator_starting_infos": [], - "validator_slash_events": [] - }, - "evidence": { - "evidence": [] - }, - "feegrant": { - "allowances": [] - }, - "genutil": { - "gen_txs": [ - { - "body": { - "messages": [ - { - "@type": "/cosmos.staking.v1beta1.MsgCreateValidator", - "description": { - "moniker": "ibc-1", - "identity": "", - "website": "", - "security_contact": "", - "details": "" - }, - "commission": { - "rate": "0.100000000000000000", - "max_rate": "0.200000000000000000", - "max_change_rate": "0.010000000000000000" - }, - "min_self_delegation": "1", - "delegator_address": "cosmos12gs7gsx968sgy8fwnzf37veqj02rd6rm96u087", - "validator_address": "cosmosvaloper12gs7gsx968sgy8fwnzf37veqj02rd6rmqwg6td", - "pubkey": { - "@type": "/cosmos.crypto.ed25519.PubKey", - "key": "RMhhlBtQSs1rDz3bVzuJarNZHNRjukKEF7r9qqCgARQ=" - }, - "value": { - "denom": "stake", - "amount": "100000000000" - } - } - ], - "memo": "56790e64fa1c26c79a5c03ae6432973f04e293cd@192.168.1.80:26656", - "timeout_height": "0", - "extension_options": [], - "non_critical_extension_options": [] - }, - "auth_info": { - "signer_infos": [ - { - "public_key": { - "@type": "/cosmos.crypto.secp256k1.PubKey", - "key": "AxsUINyw8wVnTzhmDJ3v6GBUPT7xQ+7yaf7PCm3Y2ic/" - }, - "mode_info": { - "single": { - "mode": "SIGN_MODE_DIRECT" - } - }, - "sequence": "0" - } - ], - "fee": { - "amount": [], - "gas_limit": "200000", - "payer": "", - "granter": "" - } - }, - "signatures": [ - "2MAVzXWc/buNQlM+tEM0Xxtkfv8FgF34yk0fw1B1Prt17G7AoB7cPX33GwhlvVknNtSNCg8VsTDphYocXqCDvQ==" - ] - } - ] - }, - "gov": { - "starting_proposal_id": "1", - "deposits": [], - "votes": [], - "proposals": [], - "deposit_params": { - "min_deposit": [ - { - "denom": "stake", - "amount": "10000000" - } - ], - "max_deposit_period": "200s" - }, - "voting_params": { - "voting_period": "200s" - }, - "tally_params": { - "quorum": "0.334000000000000000", - "threshold": "0.500000000000000000", - "veto_threshold": "0.334000000000000000" - } - }, - "ibc": { - "client_genesis": { - "clients": [], - "clients_consensus": [], - "clients_metadata": [], - "params": { - "allowed_clients": [ - "06-solomachine", - "07-tendermint" - ] - }, - "create_localhost": false, - "next_client_sequence": "0" - }, - "connection_genesis": { - "connections": [], - "client_connection_paths": [], - "next_connection_sequence": "0", - "params": { - "max_expected_time_per_block": "30000000000" - } - }, - "channel_genesis": { - "channels": [], - "acknowledgements": [], - "commitments": [], - "receipts": [], - "send_sequences": [], - "recv_sequences": [], - "ack_sequences": [], - "next_channel_sequence": "0" - } - }, - "interchainaccounts": { - "controller_genesis_state": { - "active_channels": [], - "interchain_accounts": [], - "ports": [], - "params": { - "controller_enabled": true - } - }, - "host_genesis_state": { - "active_channels": [], - "interchain_accounts": [], - "port": "icahost", - "params": { - "host_enabled": true, - "allow_messages": [] - } - } - }, - "liquidity": { - "params": { - "pool_types": [ - { - "id": 1, - "name": "StandardLiquidityPool", - "min_reserve_coin_num": 2, - "max_reserve_coin_num": 2, - "description": "Standard liquidity pool with pool price function X/Y, ESPM constraint, and two kinds of reserve coins" - } - ], - "min_init_deposit_amount": "1000000", - "init_pool_coin_mint_amount": "1000000", - "max_reserve_coin_amount": "0", - "pool_creation_fee": [ - { - "denom": "stake", - "amount": "40000000" - } - ], - "swap_fee_rate": "0.003000000000000000", - "withdraw_fee_rate": "0.000000000000000000", - "max_order_amount_ratio": "0.100000000000000000", - "unit_batch_height": 1, - "circuit_breaker_enabled": false - }, - "pool_records": [] - }, - "mint": { - "minter": { - "inflation": "0.130000000000000000", - "annual_provisions": "0.000000000000000000" - }, - "params": { - "mint_denom": "stake", - "inflation_rate_change": "0.130000000000000000", - "inflation_max": "0.200000000000000000", - "inflation_min": "0.070000000000000000", - "goal_bonded": "0.670000000000000000", - "blocks_per_year": "6311520" - } - }, - "packetfowardmiddleware": { - "params": { - "fee_percentage": "0.000000000000000000" - } - }, - "params": null, - "slashing": { - "params": { - "signed_blocks_window": "100", - "min_signed_per_window": "0.500000000000000000", - "downtime_jail_duration": "600s", - "slash_fraction_double_sign": "0.050000000000000000", - "slash_fraction_downtime": "0.010000000000000000" - }, - "signing_infos": [], - "missed_blocks": [] - }, - "staking": { - "params": { - "unbonding_time": "1814400s", - "max_validators": 100, - "max_entries": 7, - "historical_entries": 10000, - "bond_denom": "stake" - }, - "last_total_power": "0", - "last_validator_powers": [], - "validators": [], - "delegations": [], - "unbonding_delegations": [], - "redelegations": [], - "exported": false - }, - "transfer": { - "port_id": "transfer", - "denom_traces": [], - "params": { - "send_enabled": true, - "receive_enabled": true - } - }, - "upgrade": {}, - "vesting": {} - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/config/gentx/gentx-56790e64fa1c26c79a5c03ae6432973f04e293cd.json b/ci/chains/gaia/v7.0.1/ibc-1/config/gentx/gentx-56790e64fa1c26c79a5c03ae6432973f04e293cd.json deleted file mode 100644 index 61d9fd7d25..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/config/gentx/gentx-56790e64fa1c26c79a5c03ae6432973f04e293cd.json +++ /dev/null @@ -1 +0,0 @@ -{"body":{"messages":[{"@type":"/cosmos.staking.v1beta1.MsgCreateValidator","description":{"moniker":"ibc-1","identity":"","website":"","security_contact":"","details":""},"commission":{"rate":"0.100000000000000000","max_rate":"0.200000000000000000","max_change_rate":"0.010000000000000000"},"min_self_delegation":"1","delegator_address":"cosmos12gs7gsx968sgy8fwnzf37veqj02rd6rm96u087","validator_address":"cosmosvaloper12gs7gsx968sgy8fwnzf37veqj02rd6rmqwg6td","pubkey":{"@type":"/cosmos.crypto.ed25519.PubKey","key":"RMhhlBtQSs1rDz3bVzuJarNZHNRjukKEF7r9qqCgARQ="},"value":{"denom":"stake","amount":"100000000000"}}],"memo":"56790e64fa1c26c79a5c03ae6432973f04e293cd@192.168.1.80:26656","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[{"public_key":{"@type":"/cosmos.crypto.secp256k1.PubKey","key":"AxsUINyw8wVnTzhmDJ3v6GBUPT7xQ+7yaf7PCm3Y2ic/"},"mode_info":{"single":{"mode":"SIGN_MODE_DIRECT"}},"sequence":"0"}],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":["2MAVzXWc/buNQlM+tEM0Xxtkfv8FgF34yk0fw1B1Prt17G7AoB7cPX33GwhlvVknNtSNCg8VsTDphYocXqCDvQ=="]} diff --git a/ci/chains/gaia/v7.0.1/ibc-1/config/node_key.json b/ci/chains/gaia/v7.0.1/ibc-1/config/node_key.json deleted file mode 100644 index 3daaff2931..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"tendermint/PrivKeyEd25519","value":"c6g3gMTe4rl8L4rYw/LnIaCnYpNrXa+gG18Ob8ozBbWMh5p4zIqFZJGL2G1gkEGphTagNwScUOlI4a4DnurdEg=="}} \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/config/priv_validator_key.json b/ci/chains/gaia/v7.0.1/ibc-1/config/priv_validator_key.json deleted file mode 100644 index 91b7267961..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/config/priv_validator_key.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "address": "39D793D7F075E3BFD3142828B88DF5B13A194B5A", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "RMhhlBtQSs1rDz3bVzuJarNZHNRjukKEF7r9qqCgARQ=" - }, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "g4brql1ymthMphMQd9txX8o9FmV6cccCl7B+oRTDaUREyGGUG1BKzWsPPdtXO4lqs1kc1GO6QoQXuv2qoKABFA==" - } -} \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/5221e440c5d1e0821d2e98931f332093d436e87b.address b/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/5221e440c5d1e0821d2e98931f332093d436e87b.address deleted file mode 100644 index 6b44695ff3..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/5221e440c5d1e0821d2e98931f332093d436e87b.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNTowNC4xNTMyNTcgKzAyMDAgQ0VTVCBtPSswLjA2OTEyODQ2OCIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6IlNBNHEydGtDVXB4X0Y3X08ifQ.adYJ-_sAjRtuxIyeySQTySat5Zp1ZEbrVVEqfA4pse0qwwIEtHeL6g.MQDEaLhHPF4jR5QT.Cca1U5asQ8JLJ2qgeZr5I2BZd5naAgc2DEWrniEJTDvoS-uFndSrlR-edtZJLfOEDZQ34oeGLKMEvy6AqZAUYojA2Zqu85Xl87wq9IOaiZ7JRMjTDPAx9AMS9ZYDal1p53DQC40Z9enfqWO36KGvNjpWRlzh3pHVwopZWKJp4clyqXDvC1tVALQ76RxUpVF9_iHcsULI2gscNovc9HqJhC3PFNISVx_eDMahK713SffzD8XDWE7-fcpv.PepmuUZxDNqe2HNvr1E0Fw \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/d23ae61575324aafaca704a424074e01f2da5109.address b/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/d23ae61575324aafaca704a424074e01f2da5109.address deleted file mode 100644 index 1a727853d5..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/d23ae61575324aafaca704a424074e01f2da5109.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNTowNS4yODkyNDggKzAyMDAgQ0VTVCBtPSswLjA1MTg5NjMyNyIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6InE1UUJSSkpDQ3pDZmEtUmYifQ.rrONBnY5Aj8aT7dz0UXOGzyUQ66I1Ac2zP5PjKleSowQedk0FcdMuQ.gxMk313lDafB83M0.3iiecezUwejqiJxgO7ck6NMebrVHdmTiXhcM2So82p7Ln1z75l_oj-KpproRYYrMxr-dgxQHtld3-VgdPJpzvuKr3rdrIDi_zhMHHRzZxa1Wj8L3z3-r-npYAY1sivaRpgP8Q34_qZAjABTvE36DFbIUecORBhr-QohA4hggFszvBSHvUnzfly03QLJlTvWwtbXHDqtPISBVfoOxtzM0NZ1EYH211KZY83QYO4f6xCnZZA.vGjEm8-w40mu6EW_ii6D9g \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/dc4dabdbd3b361a2038cfb43067e349a805c6c0f.address b/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/dc4dabdbd3b361a2038cfb43067e349a805c6c0f.address deleted file mode 100644 index 07d547336e..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/dc4dabdbd3b361a2038cfb43067e349a805c6c0f.address +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNTowNi40MjExNSArMDIwMCBDRVNUIG09KzAuMDUwOTg1MjA0IiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiZXMwemowdHJBekFIN3pheiJ9.vPUBN-P-3YbENnEEZ4oZpiQXKIdE9FuTmqy7FocumRXR47lTR2l_9g.TMUNiFS4n7qvTgYn.7eIywlD-vrfJrkahp9Kfz9xn8ShKKRAH8L-KYJRmNrfNfN0fVQC7tiEuo4tjQkYw20Umsxx_9wuEVi3xarjm4aERw-StCqp5VbOE0QQtI6Sr0Pemfpx4yVj5OvVZIpiw3ho4cDWH_kWCksnsjZMqOga8zkn4E07ZdpGiBWsdBowvK9E6mAyX2yqLLsPSvaTq2wP54eZbTfRyd5u8L3qSNzKb9fm1CvJ4O1E7vdBJD-sa_AmXf60.rQ1AfXNGhQunz5Q7LQWtpQ \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/user.info b/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/user.info deleted file mode 100644 index 4665ed2142..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/user.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNTowNS4yNzU5MTcgKzAyMDAgQ0VTVCBtPSswLjAzODU2NTUxMiIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6Iks5UlMxNlVXSUF5ZUxCbl8ifQ.YCQkyMtkR3SbDmhnJDA0MZi96fsLjtOz3DF1xHxglUQVbY9cMA9CdA.n4NJ4cDazjGjE80c._rusIrVHv3Ln2e7XamD9pzoC1nS1q0sOw2iAZZjA847Bp9ZglIUg32vMR4F60pQaN1xkYlOnyUDcCn9mD4H0GZCg079tyHyMIye-mhmISHnj1b6p01vzlh7C3c5vTzCVlAwZgtb3PcwNO9v-tD6DVE8NRzSnVsQeTdWrFqhwSUZuIFO5Xq-pdBhGvgm1yfwyEk7XEeUZREiQ3s8z14fapmXjHUnyvesCEBkBCfTEIjsimaltcDMkrXfF3gYxyZmWfWnFPFfj6OiPxoTRlHwejZt4sTIrrT6meWYyUndp8aAVVM2YhZU0sB1brxn-RbuZx_AXDP6bZYVOD6yPht2OjQqLtXakumk.j-3-OZISZNQ64Q4wQYYVIA \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/user2.info b/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/user2.info deleted file mode 100644 index f3cb21a755..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/user2.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNTowNi40MTAzMSArMDIwMCBDRVNUIG09KzAuMDQwMTQ1NDUzIiwiZW5jIjoiQTI1NkdDTSIsInAyYyI6ODE5MiwicDJzIjoiT3VHWU5ZcHlwOENoc2RRYSJ9.J_QPFA8vaBIXZTMcvPIEzSWQriqJCukc5Cw8nXx9r2-AqilX--tvfw.kRbNuGXPwl_5LYWu.q-WqlXg_1ohgqyILJraRhcu4Zv1T06t4oYCi0WxzF_ukhCZ9LkDcpi9NGFuDwxjlmXXXrFsJFoz2naY0ppHPQg74iS7DKw8hav1rv6JW1z_kPKTKVzvej4JdWmoFFe8mJXPyDp3-8PNcyX0gndyLV20MQ1LSIjjaUMeDaYXlitLU5uAAxHHARnFf-uBIkq94-Ud_tLFWbnbgvs8bp0S5M86apksT20UqcqiqgdYQKOduwajeunm76f_PPuJGetkLfkmfceLeTjATkYe7vnDFLWNKb7vg2bCeCTH1sbaMHEgKtvkD-qzuSf0w7QAq1Vhd0oXj2i5AebPsmoDN7VmZGu_IRVfYK1vr.fdQwhvoluadIf1mIi8qYBQ \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/validator.info b/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/validator.info deleted file mode 100644 index b15476ee06..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/keyring-test/validator.info +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJjcmVhdGVkIjoiMjAyMi0wNS0wNiAxMjowNTowNC4xMzUzNDcgKzAyMDAgQ0VTVCBtPSswLjA1MTIxODA5OCIsImVuYyI6IkEyNTZHQ00iLCJwMmMiOjgxOTIsInAycyI6Ik1kcGhWd2RrRl9PMEw0SXUifQ.aWwkNEtb7SToBnDyxHdNx3Aj8L2RyOSbpGTPB2ZkEryPJGFJOz-hDA.CL-kA95E4_Sv12Gx.jMgUN0U13X85kSWHvUnS5wnPTty9ZMm-8Kos3dUxMBB1XTZFwPiFbehGjUgcKDtHaH3a-FYqtzeFvNTmL93hU7O2Gg_35WLbAGXGVlxMFe_t0gHUYc7nyn7lP6sElCgY-WBruyrLFFPe7TA5IISrHQGjXaOwUTK_DTDklDus75zvTtgusGEL3_J9Y3zRiFG3oE1cp8nN7yTnEHEt2ddOh46WmWpfbWrQDOvozaRkTr67iN2xsJ2dwhzJkeVUL1OVhlVfAfS6AmCm6OJjwf6k38CyR8s6FD6eQTd3kHfi_D4rD-vsca4cHc1poRfzT7qJqASv7CysA9W8DvJv4FgffhMu0rgzE3ihx880Kl01ol8w25ev.s6QGa1w6n9013h0w6e_UGg \ No newline at end of file diff --git a/ci/chains/gaia/v7.0.1/ibc-1/user2_seed.json b/ci/chains/gaia/v7.0.1/ibc-1/user2_seed.json deleted file mode 100644 index 17f84353e5..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/user2_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user2","type":"local","address":"cosmos1m3x6hk7nkds6yquvldpsvl35n2q9cmq0fumhh6","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"A/ufAZXN5V+SzW5b8o1bRW6EHofOmvW4h2lb49uG1k9o\"}","mnemonic":"tube castle all pistol animal liar news reform imitate tennis cup onion garbage glide circle fun benefit dutch worry title garbage record charge version"} diff --git a/ci/chains/gaia/v7.0.1/ibc-1/user_seed.json b/ci/chains/gaia/v7.0.1/ibc-1/user_seed.json deleted file mode 100644 index b478d34db4..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/user_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"user","type":"local","address":"cosmos16gawv9t4xf92lt98qjjzgp6wq8ed55gf49wqef","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"AsU2Pk5VDeJ+qmzGZqTDOyl/gZPQxRk2EuA1O6WFhXmD\"}","mnemonic":"gold eight fury outdoor sword catch unique eagle monster april worry layer dismiss cheese balcony bounce tribe simple tank cave radar fitness reward hundred"} diff --git a/ci/chains/gaia/v7.0.1/ibc-1/validator_seed.json b/ci/chains/gaia/v7.0.1/ibc-1/validator_seed.json deleted file mode 100644 index 1327e403f8..0000000000 --- a/ci/chains/gaia/v7.0.1/ibc-1/validator_seed.json +++ /dev/null @@ -1 +0,0 @@ -{"name":"validator","type":"local","address":"cosmos12gs7gsx968sgy8fwnzf37veqj02rd6rm96u087","pubkey":"{\"@type\":\"/cosmos.crypto.secp256k1.PubKey\",\"key\":\"AxsUINyw8wVnTzhmDJ3v6GBUPT7xQ+7yaf7PCm3Y2ic/\"}","mnemonic":"expire island increase truck party border elite exit effort layer spatial scout direct grace dream dry change hill impose nation smoke snow either pioneer"} diff --git a/ci/docker-compose-gaia-current.yml b/ci/docker-compose-gaia-current.yml deleted file mode 100644 index d162c6fa3c..0000000000 --- a/ci/docker-compose-gaia-current.yml +++ /dev/null @@ -1,61 +0,0 @@ -version: '3' - -services: - - ibc-0: - container_name: ibc-0 - image: "informaldev/ibc-0:v7.0.1" - stdin_open: true - tty: true - entrypoint: "/chain/gaia/run-gaiad.sh" - networks: - relaynet: - ipv4_address: 172.25.0.10 - environment: - - CHAIN_ID=ibc-0 - - ibc-1: - container_name: ibc-1 - image: "informaldev/ibc-1:v7.0.1" - stdin_open: true - tty: true - entrypoint: "/chain/gaia/run-gaiad.sh" - networks: - relaynet: - ipv4_address: 172.25.0.11 - environment: - - CHAIN_ID=ibc-1 - - relayer: - depends_on: - - ibc-0 - - ibc-1 - container_name: relayer - stdin_open: true - tty: true - build: - context: ../ - dockerfile: ci/relayer.Dockerfile - args: - RELEASE: v7.0.1 - environment: - - CHAIN_A=ibc-0 - - CHAIN_A_HOME=/data/ibc-0 - - CHAIN_A_PORT=26657 - - CHAIN_B=ibc-1 - - CHAIN_B_HOME=/data/ibc-1 - - CHAIN_B_PORT=26657 - - CONFIG=simple_config.toml - - RELAYER_DIR=/relayer - - RELEASE=v7.0.1 - networks: - relaynet: - ipv4_address: 172.25.0.12 - -networks: - relaynet: - driver: bridge - ipam: - driver: default - config: - - subnet: 172.25.0.0/16 diff --git a/ci/docker-compose-gaia-legacy.yml b/ci/docker-compose-gaia-legacy.yml deleted file mode 100644 index c9743f7e24..0000000000 --- a/ci/docker-compose-gaia-legacy.yml +++ /dev/null @@ -1,61 +0,0 @@ -version: '3' - -services: - - ibc-0: - container_name: ibc-0 - image: "informaldev/ibc-0:v6.0.0" - stdin_open: true - tty: true - entrypoint: "/chain/gaia/run-gaiad.sh" - networks: - relaynet: - ipv4_address: 172.25.0.10 - environment: - - CHAIN_ID=ibc-0 - - ibc-1: - container_name: ibc-1 - image: "informaldev/ibc-1:v6.0.0" - stdin_open: true - tty: true - entrypoint: "/chain/gaia/run-gaiad.sh" - networks: - relaynet: - ipv4_address: 172.25.0.11 - environment: - - CHAIN_ID=ibc-1 - - relayer: - depends_on: - - ibc-0 - - ibc-1 - container_name: relayer - stdin_open: true - tty: true - build: - context: ../ - dockerfile: ci/relayer.Dockerfile - args: - RELEASE: v6.0.0 - environment: - - CHAIN_A=ibc-0 - - CHAIN_A_HOME=/data/ibc-0 - - CHAIN_A_PORT=26657 - - CHAIN_B=ibc-1 - - CHAIN_B_HOME=/data/ibc-1 - - CHAIN_B_PORT=26657 - - CONFIG=simple_config.toml - - RELAYER_DIR=/relayer - - RELEASE=v6.0.0 - networks: - relaynet: - ipv4_address: 172.25.0.12 - -networks: - relaynet: - driver: bridge - ipam: - driver: default - config: - - subnet: 172.25.0.0/16 diff --git a/ci/e2e.sh b/ci/e2e.sh deleted file mode 100755 index cd8b26a387..0000000000 --- a/ci/e2e.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/sh - -set -e - -RELAYER_CMD=/usr/bin/hermes - -echo "=================================================================================================================" -echo " INITIALIZE " -echo "=================================================================================================================" -echo "-----------------------------------------------------------------------------------------------------------------" -echo "Show config path" -echo "-----------------------------------------------------------------------------------------------------------------" -# Configuration file -CONFIG_PATH="$RELAYER_DIR"/"$CONFIG" -echo "-----------------------------------------------------------------------------------------------------------------" -echo "Show relayer version" -echo "-----------------------------------------------------------------------------------------------------------------" -echo Config: "$CONFIG_PATH" -$RELAYER_CMD -c "$CONFIG_PATH" version -echo "-----------------------------------------------------------------------------------------------------------------" -echo "Setting up chains" -echo "-----------------------------------------------------------------------------------------------------------------" -echo " Chain:" "$CHAIN_A" -echo " creating chain store folder: "["$CHAIN_A_HOME"] -mkdir -p "$CHAIN_A_HOME" -echo " Chain:" "$CHAIN_B" ["$CHAIN_B_HOME"] -echo " creating chain store folder: "["$CHAIN_B_HOME"] -mkdir -p "$CHAIN_B_HOME" -echo Waiting 20 seconds for chains to generate blocks... -sleep 20 -echo "=================================================================================================================" -echo " CONFIGURATION " -echo "=================================================================================================================" -echo "-----------------------------------------------------------------------------------------------------------------" -echo "Add keys for chains" -echo "-----------------------------------------------------------------------------------------------------------------" -hermes -c "$CONFIG_PATH" keys add "$CHAIN_A" -f user_seed_"$CHAIN_A".json -hermes -c "$CONFIG_PATH" keys add "$CHAIN_B" -f user_seed_"$CHAIN_B".json -hermes -c "$CONFIG_PATH" keys add "$CHAIN_A" -f user2_seed_"$CHAIN_A".json -k user2 -hermes -c "$CONFIG_PATH" keys add "$CHAIN_B" -f user2_seed_"$CHAIN_B".json -k user2 - -echo "=================================================================================================================" -echo " END-TO-END TESTS " -echo "=================================================================================================================" - -python3 /relayer/e2e/run.py -c "$CONFIG_PATH" --cmd "$RELAYER_CMD" - diff --git a/ci/gaia.Dockerfile b/ci/gaia.Dockerfile deleted file mode 100644 index fce8641dc8..0000000000 --- a/ci/gaia.Dockerfile +++ /dev/null @@ -1,54 +0,0 @@ -################################################################################################### -# Build image -################################################################################################### -FROM golang:alpine AS build-env - -# Add dependencies -RUN apk add --no-cache curl make git libc-dev bash gcc linux-headers eudev-dev python3 - -# Gaia Branch or Release -ARG RELEASE - -# Clone repository -RUN git clone https://github.com/cosmos/gaia /go/src/github.com/cosmos/gaia - -# Set working directory for the build -WORKDIR /go/src/github.com/cosmos/gaia - -# Checkout branch -RUN git checkout $RELEASE - -# Install minimum necessary dependencies, build Cosmos SDK, remove packages -RUN apk add --no-cache $PACKAGES && \ - make install - -# Show version -RUN gaiad version --long - -################################################################################################### -# Final image -################################################################################################### -FROM alpine:edge -LABEL maintainer="hello@informal.systems" - -ARG RELEASE -ARG CHAIN -ARG NAME - -# Add jq for debugging -RUN apk add --no-cache jq curl tree - -WORKDIR /$NAME - -# Copy over binaries from the build-env -COPY --from=build-env /go/bin/gaiad /usr/bin/gaiad - -COPY --chown=root:root ./chains/$CHAIN/$RELEASE/$NAME /chain/$CHAIN - -# Copy entrypoint script -COPY ./run-gaiad.sh /chain/$CHAIN -RUN chmod 755 /chain/$CHAIN/run-gaiad.sh - -RUN tree -pug /chain - -ENTRYPOINT "/bin/sh" diff --git a/ci/hermes.Dockerfile b/ci/hermes.Dockerfile deleted file mode 100644 index cbee1188d6..0000000000 --- a/ci/hermes.Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -# informalsystems/hermes image -# -# Used for running hermes in docker containers -# -# Usage: -# docker build . --build-arg TAG=v0.3.0 -t informalsystems/hermes:0.3.0 -f hermes.Dockerfile - -FROM rust:1.52-buster AS build-env - -ARG TAG -WORKDIR /root - -RUN git clone https://github.com/informalsystems/ibc-rs -RUN cd ibc-rs && git checkout $TAG && cargo build --release - - -FROM debian:buster-slim -LABEL maintainer="hello@informal.systems" - -RUN useradd -m hermes -s /bin/bash -WORKDIR /home/hermes -USER hermes:hermes -ENTRYPOINT ["/usr/bin/hermes"] - -COPY --chown=0:0 --from=build-env /usr/lib/x86_64-linux-gnu/libssl.so.1.1 /usr/lib/x86_64-linux-gnu/libssl.so.1.1 -COPY --chown=0:0 --from=build-env /usr/lib/x86_64-linux-gnu/libcrypto.so.1.1 /usr/lib/x86_64-linux-gnu/libcrypto.so.1.1 -COPY --chown=0:0 --from=build-env /root/ibc-rs/target/release/hermes /usr/bin/hermes -COPY --chown=0:0 --from=build-env /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt diff --git a/ci/no-std-check/.gitignore b/ci/no-std-check/.gitignore deleted file mode 100644 index 2f7896d1d1..0000000000 --- a/ci/no-std-check/.gitignore +++ /dev/null @@ -1 +0,0 @@ -target/ diff --git a/ci/no-std-check/Cargo.lock b/ci/no-std-check/Cargo.lock deleted file mode 100644 index 08f7e5024c..0000000000 --- a/ci/no-std-check/Cargo.lock +++ /dev/null @@ -1,2540 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - -[[package]] -name = "addr2line" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "ahash" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" -dependencies = [ - "getrandom 0.2.4", - "once_cell", - "version_check", -] - -[[package]] -name = "aho-corasick" -version = "0.7.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" -dependencies = [ - "memchr", -] - -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - -[[package]] -name = "anyhow" -version = "1.0.53" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94a45b455c14666b85fc40a019e8ab9eb75e3a124e05494f5397122bc9eb06e0" - -[[package]] -name = "arrayref" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" - -[[package]] -name = "arrayvec" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" -dependencies = [ - "nodrop", -] - -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - -[[package]] -name = "arrayvec" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" - -[[package]] -name = "async-trait" -version = "0.1.52" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "autocfg" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" - -[[package]] -name = "backtrace" -version = "0.3.64" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" -dependencies = [ - "addr2line", - "cc", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base58" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" - -[[package]] -name = "base64" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitvec" -version = "0.20.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - -[[package]] -name = "blake2-rfc" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" -dependencies = [ - "arrayvec 0.4.12", - "constant_time_eq", -] - -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding 0.1.5", - "byte-tools", - "byteorder", - "generic-array 0.12.4", -] - -[[package]] -name = "block-buffer" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" -dependencies = [ - "block-padding 0.2.1", - "generic-array 0.14.5", -] - -[[package]] -name = "block-buffer" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03588e54c62ae6d763e2a80090d50353b785795361b4ff5b3bf0a5097fc31c0b" -dependencies = [ - "generic-array 0.14.5", -] - -[[package]] -name = "block-padding" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -dependencies = [ - "byte-tools", -] - -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - -[[package]] -name = "bumpalo" -version = "3.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" - -[[package]] -name = "byte-slice-cast" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d30c751592b77c499e7bce34d99d67c2c11bdc0574e9a488ddade14150a4698" - -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - -[[package]] -name = "byteorder" -version = "1.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" - -[[package]] -name = "bytes" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" - -[[package]] -name = "cc" -version = "1.0.72" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "chrono" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" -dependencies = [ - "libc", - "num-integer", - "num-traits", - "winapi", -] - -[[package]] -name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -name = "cpufeatures" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" -dependencies = [ - "libc", -] - -[[package]] -name = "crunchy" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" - -[[package]] -name = "crypto-common" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" -dependencies = [ - "generic-array 0.14.5", - "typenum", -] - -[[package]] -name = "crypto-mac" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" -dependencies = [ - "generic-array 0.14.5", - "subtle", -] - -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array 0.14.5", - "subtle", -] - -[[package]] -name = "curve25519-dalek" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9b85542f99a2dfa2a1b8e192662741c9859a846b296bef1c92ef9b58b5a216" -dependencies = [ - "byteorder", - "digest 0.8.1", - "rand_core 0.5.1", - "subtle", - "zeroize", -] - -[[package]] -name = "curve25519-dalek" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" -dependencies = [ - "byteorder", - "digest 0.9.0", - "rand_core 0.5.1", - "subtle", - "zeroize", -] - -[[package]] -name = "derive_more" -version = "0.99.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array 0.12.4", -] - -[[package]] -name = "digest" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" -dependencies = [ - "generic-array 0.14.5", -] - -[[package]] -name = "digest" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" -dependencies = [ - "block-buffer 0.10.1", - "crypto-common", -] - -[[package]] -name = "downcast-rs" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" - -[[package]] -name = "dyn-clonable" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e9232f0e607a262ceb9bd5141a3dfb3e4db6994b31989bbfd845878cba59fd4" -dependencies = [ - "dyn-clonable-impl", - "dyn-clone", -] - -[[package]] -name = "dyn-clonable-impl" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "dyn-clone" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" - -[[package]] -name = "ed25519" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74e1069e39f1454367eb2de793ed062fac4c35c2934b76a81d90dd9abcd28816" -dependencies = [ - "signature", -] - -[[package]] -name = "ed25519-dalek" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c762bae6dcaf24c4c84667b8579785430908723d5c889f469d76a41d59cc7a9d" -dependencies = [ - "curve25519-dalek 3.2.0", - "ed25519", - "rand 0.7.3", - "serde", - "sha2 0.9.9", - "zeroize", -] - -[[package]] -name = "either" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" - -[[package]] -name = "environmental" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" - -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - -[[package]] -name = "fixed-hash" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" -dependencies = [ - "byteorder", - "rand 0.8.4", - "rustc-hex", - "static_assertions", -] - -[[package]] -name = "flex-error" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c606d892c9de11507fa0dcffc116434f94e105d0bbdc4e405b61519464c49d7b" -dependencies = [ - "paste", -] - -[[package]] -name = "funty" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" - -[[package]] -name = "futures" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" - -[[package]] -name = "futures-executor" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", - "num_cpus", -] - -[[package]] -name = "futures-io" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" - -[[package]] -name = "futures-macro" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "futures-sink" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" - -[[package]] -name = "futures-task" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" - -[[package]] -name = "futures-util" -version = "0.3.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "generic-array" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" -dependencies = [ - "typenum", -] - -[[package]] -name = "generic-array" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if", - "js-sys", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", - "wasm-bindgen", -] - -[[package]] -name = "getrandom" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.10.2+wasi-snapshot-preview1", -] - -[[package]] -name = "gimli" -version = "0.26.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" - -[[package]] -name = "hash-db" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" - -[[package]] -name = "hash256-std-hasher" -version = "0.15.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" -dependencies = [ - "crunchy", -] - -[[package]] -name = "hashbrown" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" -dependencies = [ - "ahash", -] - -[[package]] -name = "hashbrown" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c21d40587b92fa6a6c6e3c1bdbf87d75511db5672f9c93175574b3a00df1758" -dependencies = [ - "ahash", -] - -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - -[[package]] -name = "hex" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" - -[[package]] -name = "hmac" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" -dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", -] - -[[package]] -name = "hmac" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" -dependencies = [ - "crypto-mac 0.11.1", - "digest 0.9.0", -] - -[[package]] -name = "hmac-drbg" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" -dependencies = [ - "digest 0.9.0", - "generic-array 0.14.5", - "hmac 0.8.1", -] - -[[package]] -name = "ibc" -version = "0.15.0" -dependencies = [ - "bytes", - "derive_more", - "flex-error", - "ibc-proto", - "ics23", - "num-traits", - "prost", - "prost-types", - "safe-regex", - "serde", - "serde_derive", - "serde_json", - "sha2 0.10.2", - "subtle-encoding", - "tendermint", - "tendermint-light-client-verifier", - "tendermint-proto", - "time", - "tracing", - "uint", -] - -[[package]] -name = "ibc-proto" -version = "0.18.0" -dependencies = [ - "base64", - "bytes", - "prost", - "prost-types", - "serde", - "tendermint-proto", -] - -[[package]] -name = "ics23" -version = "0.8.0-alpha" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a435f2471c1b2ce14771da465d47321c5905fac866d0effa9e0a3eb5d94fcf" -dependencies = [ - "anyhow", - "bytes", - "hex", - "prost", - "ripemd160", - "sha2 0.9.9", - "sha3", - "sp-std 3.0.0", -] - -[[package]] -name = "impl-codec" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" -dependencies = [ - "parity-scale-codec", -] - -[[package]] -name = "impl-serde" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-trait-for-tuples" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "integer-sqrt" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "276ec31bcb4a9ee45f58bec6f9ec700ae4cf4f4f8f2fa7e06cb406bd5ffdd770" -dependencies = [ - "num-traits", -] - -[[package]] -name = "itertools" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" - -[[package]] -name = "js-sys" -version = "0.3.56" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "keccak" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.117" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e74d72e0f9b65b5b4ca49a346af3976df0f9c61d550727f349ecd559f251a26c" - -[[package]] -name = "libsecp256k1" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" -dependencies = [ - "arrayref", - "base64", - "digest 0.9.0", - "hmac-drbg", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", - "rand 0.8.4", - "serde", - "sha2 0.9.9", - "typenum", -] - -[[package]] -name = "libsecp256k1-core" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" -dependencies = [ - "crunchy", - "digest 0.9.0", - "subtle", -] - -[[package]] -name = "libsecp256k1-gen-ecmult" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "libsecp256k1-gen-genmult" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" -dependencies = [ - "libsecp256k1-core", -] - -[[package]] -name = "lock_api" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88943dd7ef4a2e5a4bfa2753aaab3013e34ce2533d1996fb18ef591e315e2b3b" -dependencies = [ - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "matchers" -version = "0.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" -dependencies = [ - "regex-automata", -] - -[[package]] -name = "memchr" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" - -[[package]] -name = "memory-db" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d505169b746dacf02f7d14d8c80b34edfd8212159c63d23c977739a0d960c626" -dependencies = [ - "hash-db", - "hashbrown 0.11.2", - "parity-util-mem", -] - -[[package]] -name = "memory_units" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" - -[[package]] -name = "merlin" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e261cf0f8b3c42ded9f7d2bb59dea03aa52bc8a1cbc7482f9fc3fd1229d3b42" -dependencies = [ - "byteorder", - "keccak", - "rand_core 0.5.1", - "zeroize", -] - -[[package]] -name = "miniz_oxide" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" -dependencies = [ - "adler", - "autocfg", -] - -[[package]] -name = "no-std-check" -version = "0.1.0" -dependencies = [ - "ibc", - "ibc-proto", - "sp-core", - "sp-io", - "sp-runtime", - "sp-std 4.0.0", - "tendermint", - "tendermint-light-client-verifier", - "tendermint-proto", -] - -[[package]] -name = "nodrop" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" - -[[package]] -name = "num-bigint" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-derive" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "num-integer" -version = "0.1.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" -dependencies = [ - "autocfg", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" -dependencies = [ - "autocfg", - "num-bigint", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-traits" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" -dependencies = [ - "autocfg", -] - -[[package]] -name = "num_cpus" -version = "1.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "num_threads" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ba99ba6393e2c3734791401b66902d981cb03bf190af674ca69949b6d5fb15" -dependencies = [ - "libc", -] - -[[package]] -name = "object" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" -dependencies = [ - "memchr", -] - -[[package]] -name = "once_cell" -version = "1.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" - -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - -[[package]] -name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" - -[[package]] -name = "parity-scale-codec" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" -dependencies = [ - "arrayvec 0.7.2", - "bitvec", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive", - "serde", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "parity-util-mem" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f4cb4e169446179cbc6b8b6320cc9fca49bd2e94e8db25f25f200a8ea774770" -dependencies = [ - "cfg-if", - "hashbrown 0.11.2", - "impl-trait-for-tuples", - "parity-util-mem-derive", - "parking_lot", - "primitive-types", - "winapi", -] - -[[package]] -name = "parity-util-mem-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" -dependencies = [ - "proc-macro2", - "syn", - "synstructure", -] - -[[package]] -name = "parity-wasm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" - -[[package]] -name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" -dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall", - "smallvec", - "winapi", -] - -[[package]] -name = "paste" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0744126afe1a6dd7f394cb50a716dbe086cb06e255e53d8d0185d82828358fb5" - -[[package]] -name = "pbkdf2" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216eaa586a190f0a738f2f918511eecfa90f13295abec0e457cdebcceda80cbd" -dependencies = [ - "crypto-mac 0.8.0", -] - -[[package]] -name = "pbkdf2" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d95f5254224e617595d2cc3cc73ff0a5eaf2637519e25f03388154e9378b6ffa" -dependencies = [ - "crypto-mac 0.11.1", -] - -[[package]] -name = "pin-project-lite" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "ppv-lite86" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" - -[[package]] -name = "primitive-types" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" -dependencies = [ - "fixed-hash", - "impl-codec", - "impl-serde", - "scale-info", - "uint", -] - -[[package]] -name = "proc-macro-crate" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ebace6889caf889b4d3f76becee12e90353f2b8c7d875534a71e5742f8f6f83" -dependencies = [ - "thiserror", - "toml", -] - -[[package]] -name = "proc-macro2" -version = "1.0.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "prost" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a07b0857a71a8cb765763950499cae2413c3f9cede1133478c43600d9e146890" -dependencies = [ - "bytes", - "prost-derive", -] - -[[package]] -name = "prost-derive" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "prost-types" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" -dependencies = [ - "bytes", - "prost", -] - -[[package]] -name = "quote" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "864d3e96a899863136fc6e99f3d7cae289dafe43bf2c5ac19b70df7210c0a145" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "radium" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" - -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc 0.2.0", - "rand_pcg", -] - -[[package]] -name = "rand" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" -dependencies = [ - "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.3", - "rand_hc 0.3.1", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core 0.6.3", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", -] - -[[package]] -name = "rand_core" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" -dependencies = [ - "getrandom 0.2.4", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core 0.6.3", -] - -[[package]] -name = "rand_pcg" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "redox_syscall" -version = "0.2.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" -dependencies = [ - "bitflags", -] - -[[package]] -name = "ref-cast" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" -dependencies = [ - "ref-cast-impl", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "regex" -version = "1.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" -dependencies = [ - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.6.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" - -[[package]] -name = "ripemd160" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eca4ecc81b7f313189bf73ce724400a07da2a6dac19588b03c8bd76a2dcc251" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" - -[[package]] -name = "rustc-hash" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" - -[[package]] -name = "rustc-hex" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" - -[[package]] -name = "ryu" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" - -[[package]] -name = "safe-proc-macro2" -version = "1.0.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "814c536dcd27acf03296c618dab7ad62d28e70abd7ba41d3f34a2ce707a2c666" -dependencies = [ - "unicode-xid", -] - -[[package]] -name = "safe-quote" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77e530f7831f3feafcd5f1aae406ac205dd998436b4007c8e80f03eca78a88f7" -dependencies = [ - "safe-proc-macro2", -] - -[[package]] -name = "safe-regex" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15289bf322e0673d52756a18194167f2378ec1a15fe884af6e2d2cb934822b0" -dependencies = [ - "safe-regex-macro", -] - -[[package]] -name = "safe-regex-compiler" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fba76fae590a2aa665279deb1f57b5098cbace01a0c5e60e262fcf55f7c51542" -dependencies = [ - "safe-proc-macro2", - "safe-quote", -] - -[[package]] -name = "safe-regex-macro" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c2e96b5c03f158d1b16ba79af515137795f4ad4e8de3f790518aae91f1d127" -dependencies = [ - "safe-proc-macro2", - "safe-regex-compiler", -] - -[[package]] -name = "scale-info" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c55b744399c25532d63a0d2789b109df8d46fc93752d46b0782991a931a782f" -dependencies = [ - "bitvec", - "cfg-if", - "derive_more", - "parity-scale-codec", - "scale-info-derive", -] - -[[package]] -name = "scale-info-derive" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baeb2780690380592f86205aa4ee49815feb2acad8c2f59e6dd207148c3f1fcd" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "schnorrkel" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "curve25519-dalek 2.1.3", - "getrandom 0.1.16", - "merlin", - "rand 0.7.3", - "rand_core 0.5.1", - "sha2 0.8.2", - "subtle", - "zeroize", -] - -[[package]] -name = "scopeguard" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" - -[[package]] -name = "secrecy" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" -dependencies = [ - "zeroize", -] - -[[package]] -name = "serde" -version = "1.0.136" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_bytes" -version = "0.11.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16ae07dd2f88a366f15bd0632ba725227018c69a1c8550a927324f8eb8368bb9" -dependencies = [ - "serde", -] - -[[package]] -name = "serde_derive" -version = "1.0.136" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "serde_json" -version = "1.0.78" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d23c1ba4cf0efd44be32017709280b32d1cea5c3f1275c3b6d9e8bc54f758085" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_repr" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98d0516900518c29efa217c298fa1f4e6c6ffc85ae29fd7f4ee48f176e1a9ed5" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - -[[package]] -name = "sha2" -version = "0.9.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" -dependencies = [ - "block-buffer 0.9.0", - "cfg-if", - "cpufeatures", - "digest 0.9.0", - "opaque-debug 0.3.0", -] - -[[package]] -name = "sha2" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest 0.10.3", -] - -[[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug 0.3.0", -] - -[[package]] -name = "sharded-slab" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" -dependencies = [ - "lazy_static", -] - -[[package]] -name = "signature" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f054c6c1a6e95179d6f23ed974060dcefb2d9388bb7256900badad682c499de4" - -[[package]] -name = "slab" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" - -[[package]] -name = "smallvec" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" - -[[package]] -name = "sp-application-crypto" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69c8dfffb7a15ac2c802bc9ce972b8449c8ce0a15254ae4d77a012a3a8c278d1" -dependencies = [ - "parity-scale-codec", - "scale-info", - "serde", - "sp-core", - "sp-io", - "sp-std 4.0.0", -] - -[[package]] -name = "sp-arithmetic" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa92b9707afdaa807bcb985fcc70645ebbe6fbb2442620d61dc47e7f3553a7ae" -dependencies = [ - "integer-sqrt", - "num-traits", - "parity-scale-codec", - "scale-info", - "serde", - "sp-debug-derive", - "sp-std 4.0.0", - "static_assertions", -] - -[[package]] -name = "sp-core" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e2a372e6c3a40464f490f76843dfc331d08e43f7672d73377c0458f5b5db7ed" -dependencies = [ - "base58", - "bitflags", - "blake2-rfc", - "byteorder", - "dyn-clonable", - "ed25519-dalek", - "futures", - "hash-db", - "hash256-std-hasher", - "hex", - "impl-serde", - "lazy_static", - "libsecp256k1", - "log", - "merlin", - "num-traits", - "parity-scale-codec", - "parity-util-mem", - "parking_lot", - "primitive-types", - "rand 0.7.3", - "regex", - "scale-info", - "schnorrkel", - "secrecy", - "serde", - "sha2 0.10.2", - "sp-core-hashing", - "sp-debug-derive", - "sp-externalities", - "sp-runtime-interface", - "sp-std 4.0.0", - "sp-storage", - "ss58-registry", - "substrate-bip39", - "thiserror", - "tiny-bip39", - "tiny-keccak", - "twox-hash", - "wasmi", - "zeroize", -] - -[[package]] -name = "sp-core-hashing" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec864a6a67249f0c8dd3d5acab43623a61677e85ff4f2f9b04b802d2fe780e83" -dependencies = [ - "blake2-rfc", - "byteorder", - "sha2 0.9.9", - "sp-std 4.0.0", - "tiny-keccak", - "twox-hash", -] - -[[package]] -name = "sp-debug-derive" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d676664972e22a0796176e81e7bec41df461d1edf52090955cdab55f2c956ff2" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-externalities" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54f0fe949490000a97f367dc2acf4d2e7d0473ec0e13ce1775d0804bb64d3c98" -dependencies = [ - "environmental", - "parity-scale-codec", - "sp-std 4.0.0", - "sp-storage", -] - -[[package]] -name = "sp-io" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dece853333a36d405be49d72a5190c1aa3fa6fbc442a8f28c59b50c0351b21d6" -dependencies = [ - "futures", - "hash-db", - "libsecp256k1", - "log", - "parity-scale-codec", - "parking_lot", - "sp-core", - "sp-externalities", - "sp-keystore", - "sp-runtime-interface", - "sp-state-machine", - "sp-std 4.0.0", - "sp-tracing", - "sp-trie", - "sp-wasm-interface", - "tracing", - "tracing-core", -] - -[[package]] -name = "sp-keystore" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3c3376dede1940014254f6d10c9fca20c4deb1fae930a98edfdab0190a06ed4" -dependencies = [ - "async-trait", - "futures", - "merlin", - "parity-scale-codec", - "parking_lot", - "schnorrkel", - "sp-core", - "sp-externalities", - "thiserror", -] - -[[package]] -name = "sp-panic-handler" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2101f3c555fceafcfcfb0e61c55ea9ed80dc60bd77d54d9f25b369edb029e9a4" -dependencies = [ - "backtrace", - "lazy_static", - "regex", -] - -[[package]] -name = "sp-runtime" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3772a765d076ac548c0c37f6d84dc8873bf64d0cfcf8b7b7322b5374d635ceff" -dependencies = [ - "either", - "hash256-std-hasher", - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "parity-util-mem", - "paste", - "rand 0.7.3", - "scale-info", - "serde", - "sp-application-crypto", - "sp-arithmetic", - "sp-core", - "sp-io", - "sp-std 4.0.0", -] - -[[package]] -name = "sp-runtime-interface" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab1f6625996742e3e5c0e6a6a2b5c0db363e2c52f4a71ea6cd6d963a10a4bdd6" -dependencies = [ - "impl-trait-for-tuples", - "parity-scale-codec", - "primitive-types", - "sp-externalities", - "sp-runtime-interface-proc-macro", - "sp-std 4.0.0", - "sp-storage", - "sp-tracing", - "sp-wasm-interface", - "static_assertions", -] - -[[package]] -name = "sp-runtime-interface-proc-macro" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b58cc6060b2d2f35061db5b4172f4a47353c3f01a89f281699a6c3f05d1267a" -dependencies = [ - "Inflector", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "sp-state-machine" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e66851fd082cccc6f66dddf6eda458f30766a3febd769f1b5592120aa8072fd" -dependencies = [ - "hash-db", - "log", - "num-traits", - "parity-scale-codec", - "parking_lot", - "rand 0.7.3", - "smallvec", - "sp-core", - "sp-externalities", - "sp-panic-handler", - "sp-std 4.0.0", - "sp-trie", - "thiserror", - "tracing", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-std" -version = "3.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35391ea974fa5ee869cb094d5b437688fbf3d8127d64d1b9fed5822a1ed39b12" - -[[package]] -name = "sp-std" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14804d6069ee7a388240b665f17908d98386ffb0b5d39f89a4099fc7a2a4c03f" - -[[package]] -name = "sp-storage" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "838ec2a757a8e7b903684a71f649dfbd6449d91e3e5d93979fc8d2ee77d56fee" -dependencies = [ - "impl-serde", - "parity-scale-codec", - "ref-cast", - "serde", - "sp-debug-derive", - "sp-std 4.0.0", -] - -[[package]] -name = "sp-tracing" -version = "4.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4688fceac497cee7e9b72c387fef20fa517e2bf6a3bf52a4a45dcc9391d6201" -dependencies = [ - "parity-scale-codec", - "sp-std 4.0.0", - "tracing", - "tracing-core", - "tracing-subscriber", -] - -[[package]] -name = "sp-trie" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a7ad44bbb93fb9af94ea9869ec13602884f7d2360c245377aed7c6c92c6834e" -dependencies = [ - "hash-db", - "memory-db", - "parity-scale-codec", - "scale-info", - "sp-core", - "sp-std 4.0.0", - "trie-db", - "trie-root", -] - -[[package]] -name = "sp-wasm-interface" -version = "5.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60951479e2f26018c4c315a6f48956b9e2a3803953517bf8930e69b9a7a159df" -dependencies = [ - "impl-trait-for-tuples", - "log", - "parity-scale-codec", - "sp-std 4.0.0", - "wasmi", -] - -[[package]] -name = "ss58-registry" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8319f44e20b42e5c11b88b1ad4130c35fe2974665a007b08b02322070177136a" -dependencies = [ - "Inflector", - "proc-macro2", - "quote", - "serde", - "serde_json", - "unicode-xid", -] - -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - -[[package]] -name = "substrate-bip39" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49eee6965196b32f882dd2ee85a92b1dbead41b04e53907f269de3b0dc04733c" -dependencies = [ - "hmac 0.11.0", - "pbkdf2 0.8.0", - "schnorrkel", - "sha2 0.9.9", - "zeroize", -] - -[[package]] -name = "subtle" -version = "2.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" - -[[package]] -name = "subtle-encoding" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dcb1ed7b8330c5eed5441052651dd7a12c75e2ed88f2ec024ae1fa3a5e59945" -dependencies = [ - "zeroize", -] - -[[package]] -name = "syn" -version = "1.0.86" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", -] - -[[package]] -name = "synstructure" -version = "0.12.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "unicode-xid", -] - -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - -[[package]] -name = "tendermint" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ca881fa4dedd2b46334f13be7fbc8cc1549ba4be5a833fe4e73d1a1baaf7949" -dependencies = [ - "async-trait", - "bytes", - "ed25519", - "ed25519-dalek", - "flex-error", - "futures", - "num-traits", - "once_cell", - "prost", - "prost-types", - "serde", - "serde_bytes", - "serde_json", - "serde_repr", - "sha2 0.9.9", - "signature", - "subtle", - "subtle-encoding", - "tendermint-proto", - "time", - "zeroize", -] - -[[package]] -name = "tendermint-light-client-verifier" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ae030a759b89cca84860d497d4d4e491615d8a9243cc04c61cd89335ba9b593" -dependencies = [ - "derive_more", - "flex-error", - "serde", - "tendermint", - "time", -] - -[[package]] -name = "tendermint-proto" -version = "0.23.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71f925d74903f4abbdc4af0110635a307b3cb05b175fdff4a7247c14a4d0874" -dependencies = [ - "bytes", - "flex-error", - "num-derive", - "num-traits", - "prost", - "prost-types", - "serde", - "serde_bytes", - "subtle-encoding", - "time", -] - -[[package]] -name = "thiserror" -version = "1.0.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "thread_local" -version = "1.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" -dependencies = [ - "once_cell", -] - -[[package]] -name = "time" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "004cbc98f30fa233c61a38bc77e96a9106e65c88f2d3bef182ae952027e5753d" -dependencies = [ - "libc", - "num_threads", - "time-macros", -] - -[[package]] -name = "time-macros" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25eb0ca3468fc0acc11828786797f6ef9aa1555e4a211a60d64cc8e4d1be47d6" - -[[package]] -name = "tiny-bip39" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc59cb9dfc85bb312c3a78fd6aa8a8582e310b0fa885d5bb877f6dcc601839d" -dependencies = [ - "anyhow", - "hmac 0.8.1", - "once_cell", - "pbkdf2 0.4.0", - "rand 0.7.3", - "rustc-hash", - "sha2 0.9.9", - "thiserror", - "unicode-normalization", - "wasm-bindgen", - "zeroize", -] - -[[package]] -name = "tiny-keccak" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" -dependencies = [ - "crunchy", -] - -[[package]] -name = "tinyvec" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" - -[[package]] -name = "toml" -version = "0.5.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" -dependencies = [ - "serde", -] - -[[package]] -name = "tracing" -version = "0.1.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" -dependencies = [ - "cfg-if", - "pin-project-lite", - "tracing-attributes", - "tracing-core", -] - -[[package]] -name = "tracing-attributes" -version = "0.1.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e65ce065b4b5c53e73bb28912318cb8c9e9ad3921f1d669eb0e68b4c8143a2b" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "tracing-core" -version = "0.1.22" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03cfcb51380632a72d3111cb8d3447a8d908e577d31beeac006f836383d29a23" -dependencies = [ - "lazy_static", - "valuable", -] - -[[package]] -name = "tracing-log" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" -dependencies = [ - "lazy_static", - "log", - "tracing-core", -] - -[[package]] -name = "tracing-serde" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" -dependencies = [ - "serde", - "tracing-core", -] - -[[package]] -name = "tracing-subscriber" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" -dependencies = [ - "ansi_term", - "chrono", - "lazy_static", - "matchers", - "regex", - "serde", - "serde_json", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", - "tracing-serde", -] - -[[package]] -name = "trie-db" -version = "0.23.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32d034c0d3db64b43c31de38e945f15b40cd4ca6d2dcfc26d4798ce8de4ab83" -dependencies = [ - "hash-db", - "hashbrown 0.12.0", - "log", - "rustc-hex", - "smallvec", -] - -[[package]] -name = "trie-root" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a36c5ca3911ed3c9a5416ee6c679042064b93fc637ded67e25f92e68d783891" -dependencies = [ - "hash-db", -] - -[[package]] -name = "twox-hash" -version = "1.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0" -dependencies = [ - "cfg-if", - "rand 0.8.4", - "static_assertions", -] - -[[package]] -name = "typenum" -version = "1.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" - -[[package]] -name = "uint" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12f03af7ccf01dd611cc450a0d10dbc9b745770d096473e2faf0ca6e2d66d1e0" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - -[[package]] -name = "unicode-normalization" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "unicode-xid" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" - -[[package]] -name = "valuable" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - -[[package]] -name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" - -[[package]] -name = "wasm-bindgen" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" -dependencies = [ - "cfg-if", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" -dependencies = [ - "bumpalo", - "lazy_static", - "log", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.79" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" - -[[package]] -name = "wasmi" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca00c5147c319a8ec91ec1a0edbec31e566ce2c9cc93b3f9bb86a9efd0eb795d" -dependencies = [ - "downcast-rs", - "libc", - "memory_units", - "num-rational", - "num-traits", - "parity-wasm", - "wasmi-validation", -] - -[[package]] -name = "wasmi-validation" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "165343ecd6c018fc09ebcae280752702c9a2ef3e6f8d02f1cfcbdb53ef6d7937" -dependencies = [ - "parity-wasm", -] - -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[package]] -name = "wyz" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" - -[[package]] -name = "zeroize" -version = "1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c88870063c39ee00ec285a2f8d6a966e5b6fb2becc4e8dac77ed0d370ed6006" -dependencies = [ - "zeroize_derive", -] - -[[package]] -name = "zeroize_derive" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81e8f13fef10b63c06356d65d416b070798ddabcadc10d3ece0c5be9b3c7eddb" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] diff --git a/ci/no-std-check/Cargo.toml b/ci/no-std-check/Cargo.toml deleted file mode 100644 index b1567d863e..0000000000 --- a/ci/no-std-check/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "no-std-check" -version = "0.1.0" -edition = "2021" -resolver = "2" - -[dependencies] -ibc = { path = "../../modules", default-features = false } -ibc-proto = { path = "../../proto", default-features = false } -tendermint = { version = "0.23.7", default-features = false } -tendermint-proto = { version = "0.23.7", default-features = false } -tendermint-light-client-verifier = { version = "0.23.7", default-features = false } - -sp-core = { version = "5.0.0", default-features = false, optional = true } -sp-io = { version = "5.0.0", default-features = false, optional = true } -sp-runtime = { version = "5.0.0", default-features = false, optional = true } -sp-std = { version = "4.0.0", default-features = false, optional = true } - -[features] -panic-handler = [] -use-substrate = [ - "sp-core", - "sp-io", - "sp-runtime", - "sp-std", -] -substrate-std = [ - "sp-core/std", - "sp-io/std", - "sp-runtime/std", - "sp-std/std", -] - -# [patch.crates-io] -# tendermint = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } -# tendermint-proto = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } -# tendermint-light-client-verifier = { git = "https://github.com/informalsystems/tendermint-rs", branch = "v0.23.x" } diff --git a/ci/no-std-check/Makefile b/ci/no-std-check/Makefile deleted file mode 100644 index b41cbf75f4..0000000000 --- a/ci/no-std-check/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -NIGHTLY_VERSION=nightly - -setup: - rustup install $(NIGHTLY_VERSION) - rustup target add wasm32-unknown-unknown --toolchain $(NIGHTLY_VERSION) - -build-substrate: - cargo build \ - --no-default-features \ - --features use-substrate,substrate-std - -check-panic-conflict: - cargo build \ - --no-default-features \ - --features panic-handler - -check-cargo-build-std: - rustup run $(NIGHTLY_VERSION) -- \ - cargo build -Z build-std=core,alloc \ - --no-default-features \ - --target x86_64-unknown-linux-gnu - -check-wasm: - rustup run $(NIGHTLY_VERSION) -- \ - cargo build \ - --features panic-handler \ - --target wasm32-unknown-unknown - -check-substrate: - rustup run $(NIGHTLY_VERSION) -- \ - cargo build \ - --no-default-features \ - --features use-substrate \ - --target wasm32-unknown-unknown diff --git a/ci/no-std-check/README.md b/ci/no-std-check/README.md deleted file mode 100644 index 16468ae76f..0000000000 --- a/ci/no-std-check/README.md +++ /dev/null @@ -1,191 +0,0 @@ -# `no_std` Compliance Check - -This crate checks the `no_std` compliance of the supported crates in ibc-rs. - -## Make Recipes - -- `check-panic-conflict` - Check for `no_std` compliance by installing a panic handler, and any other crate importing `std` will cause a conflict. Runs on default target. - -- `check-cargo-build-std` - Check for `no_std` compliance using Cargo nightly's `build-std` feature. Runs on the target `x86_64-unknown-linux-gnu`. - -- `check-wasm` - Check for WebAssembly and `no_std` compliance by building on the target `wasm32-unknown-unknown` and installing a panic handler. - -- `check-substrate` - Check for Substrate, WebAssembly, and `no_std` compliance by importing Substrate crates and building on `wasm32-unknown-unknown`. Any crate using `std` will cause a conflict on the panic and out-of-memory (OOM) handlers installed by `sp-io`. - -## Checking Single Unsupported Dependency - -By default, the check scripts try to build all unsupported dependencies and will fail. To test if a particular crate still fails the no_std check, edit the `use-unsupported` list in [Cargo.toml](./Cargo.toml) to uncomment all crates except the crate that we are interested to check. For example, to check for only the `getrandom` crate: - -```toml -use-unsupported = [ - # "tonic", - # "socket2", - "getrandom", - # "serde", - # ..., -] -``` - -## Adding New Dependencies - -For a crate named `my-package-1.2.3`, first try and add the crate in [Cargo.toml](./Cargo.toml) of this project as: - -```toml -my-package = { version = "1.2.3" } -``` - -Then comment out the `use-unsupported` list in the `[features]` section of Cargo.toml and replace it with an empty list temporarily for testing: - -```toml -[features] -... -use-unsupported = [] -# use-unsupported = [ -# "tonic", -# "socket2", -# "getrandom", -# ... -# ] -``` - -Then import the package in [src/lib.rs](./src/lib.rs): - -```rust -use my_package -``` - -Note that you must import the package in `lib.rs`, otherwise Cargo will skip linking the crate and fail to check for the panic handler conflicts. - -Then run all of the check scripts and see if any of them fails. If the check script fails, try and disable the default features and run the checks again: - -```rust -my-package = { version = "1.2.3", default-features = false } -``` - -You may also need other tweaks such as enable custom features to make it run on Wasm. -At this point if the checks pass, we have verified the no_std compliance of `my-package`. Restore the original `use-unsupported` list and commit the code. - -Otherwise if it fails, we have found a dependency that does not support `no_std`. Update Cargo.toml to make the crate optional: - -```rust -my-package = { version = "1.2.3", optional = true, default-features = false } -``` - -Now we have to modify [lib.rs](./src/lib.rs) again and only import the crate if it is enabled: - -```rust -#[cfg(feature = "my-package")] -use my_package; -``` - -Retore the original `use-unsupported` list, and add `my-package` to the end of the list: - -```toml -use-unsupported = [ - "tonic", - "socket2", - "getrandom", - ..., - "my-package", -] -``` - -Commit the changes so that we will track if newer version of the crate would support no_std in the future. - -## Conflict Detection Methods - -There are two methods that we use to detect `std` conflict: - -### Panic Handler Conflict - -This follows the outline of the guide by -[Danilo Bargen](https://blog.dbrgn.ch/2019/12/24/testing-for-no-std-compatibility/) -to register a panic handler in the `no-std-check` crate. -Any crate imported `no-std-check` that uses `std` will cause a compile error that -looks like follows: - -``` -$ cargo build - Updating crates.io index - Compiling no-std-check v0.1.0 (/data/development/informal/ibc-rs/no-std-check) -error[E0152]: found duplicate lang item `panic_impl` - --> src/lib.rs:31:1 - | -31 | fn panic(_info: &PanicInfo) -> ! { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: the lang item is first defined in crate `std` (which `prost` depends on) - = note: first definition in `std` loaded from /home/ubuntu/.rustup/toolchains/stable-x86_64-unknown-linux-gnu/lib/rustlib/x86_64-unknown-linux-gnu/lib/libstd-b6b48477bfa8c673.rlib - = note: second definition in the local crate (`no_std_check`) - -error: aborting due to previous error - -For more information about this error, try `rustc --explain E0152`. -error: could not compile `no-std-check` -``` - -- Pros: - - Can be tested using Rust stable. -- Cons: - - Crates must be listed on both `Cargo.toml` and `lib.rs`. - - Crates that are listed in `Cargo.toml` but not imported inside `lib.rs` are not checked. - -### Overrride std crates using Cargo Nightly - -This uses the unstable `build-std` feature provided by -[Cargo Nightly](https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#build-std). -With this we can explicitly pass the std crates we want to support, `core` and `alloc`, -via command line, and exclude the `std` crate. - -If any of the dependency uses `std`, they will fail to compile at all, albeit with -confusing error messages: - -``` -$ rustup run nightly -- cargo build -j1 -Z build-std=core,alloc --target x86_64-unknown-linux-gnu - ... - Compiling bytes v1.0.1 -error[E0773]: attempted to define built-in macro more than once - --> /home/ubuntu/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/macros/mod.rs:1201:5 - | -1201 | / macro_rules! cfg { -1202 | | ($($cfg:tt)*) => { -1203 | | /* compiler built-in */ -1204 | | }; -1205 | | } - | |_____^ - | -note: previously defined here - --> /home/ubuntu/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/macros/mod.rs:1201:5 - | -1201 | / macro_rules! cfg { -1202 | | ($($cfg:tt)*) => { -1203 | | /* compiler built-in */ -1204 | | }; -1205 | | } - | |_____^ - -error: duplicate lang item in crate `core` (which `std` depends on): `bool`. - | - = note: the lang item is first defined in crate `core` (which `bytes` depends on) - = note: first definition in `core` loaded from /data/development/informal/ibc-rs/no-std-check/target/x86_64-unknown-linux-gnu/debug/deps/libcore-c00d94870d25cd7e.rmeta - = note: second definition in `core` loaded from /home/ubuntu/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/rustlib/x86_64-unknown-linux-gnu/lib/libcore-9924c22ae1efcf66.rlib - -error: duplicate lang item in crate `core` (which `std` depends on): `char`. - | - = note: the lang item is first defined in crate `core` (which `bytes` depends on) - = note: first definition in `core` loaded from /data/development/informal/ibc-rs/no-std-check/target/x86_64-unknown-linux-gnu/debug/deps/libcore-c00d94870d25cd7e.rmeta - = note: second definition in `core` loaded from /home/ubuntu/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/lib/rustlib/x86_64-unknown-linux-gnu/lib/libcore-9924c22ae1efcf66.rlib -... -``` - -The above error are shown when building the `bytes` crate. This is caused by `bytes` using imports from `std`, -which causes `std` to be included and produce conflicts with the `core` crate that is explicitly built by Cargo. -This produces very long error messages, so we may want to use tools like `less` to scroll through the errors. - -Pros: - - Directly identify use of `std` in dependencies. - - Error is raised on the first dependency that imports `std`. - -Cons: - - Nightly-only feature that is subject to change. - - Confusing and long error messages. diff --git a/ci/no-std-check/src/lib.rs b/ci/no-std-check/src/lib.rs deleted file mode 100644 index c45dffb897..0000000000 --- a/ci/no-std-check/src/lib.rs +++ /dev/null @@ -1,52 +0,0 @@ -// ensure_no_std/src/main.rs -#![no_std] -#![allow(unused_imports)] - -extern crate alloc; - -// Import the crates that we want to check if they are fully no-std compliance - -use ibc; -use ibc_proto; -use tendermint; -use tendermint_proto; -use tendermint_light_client_verifier; - -#[cfg(feature = "sp-core")] -use sp_core; - -#[cfg(feature = "sp-io")] -use sp_io; - -#[cfg(feature = "sp-runtime")] -use sp_runtime; - -#[cfg(feature = "sp-std")] -use sp_std; - -use core::panic::PanicInfo; - -/* - -This function definition checks for the compliance of no-std in -dependencies by causing a compile error if this crate is -linked with `std`. When that happens, you should see error messages -such as follows: - -``` -error[E0152]: found duplicate lang item `panic_impl` - --> no-std-check/src/lib.rs - | -12 | fn panic(_info: &PanicInfo) -> ! { - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - | - = note: the lang item is first defined in crate `std` (which `offending-crate` depends on) -``` - - */ -#[cfg(feature="panic-handler")] -#[panic_handler] -#[no_mangle] -fn panic(_info: &PanicInfo) -> ! { - loop {} -} diff --git a/ci/relayer.Dockerfile b/ci/relayer.Dockerfile deleted file mode 100644 index 191b8cc166..0000000000 --- a/ci/relayer.Dockerfile +++ /dev/null @@ -1,40 +0,0 @@ -##################################################### -#### Relayer image #### -##################################################### -FROM ubuntu:21.04 -LABEL maintainer="hello@informal.systems" - -ARG RELEASE - -# Add Python 3 and Ping and Telnet (for testing) -RUN apt-get update -y && apt-get install python3 python3-toml iputils-ping telnet -y - -# Copy relayer executable -COPY ./hermes /usr/bin/hermes - -# Relayer folder -WORKDIR /relayer - -# Copy configuration file -COPY ci/simple_config.toml . - -# Copy setup script -COPY ci/e2e.sh . - -# Copy end-to-end testing script -COPY e2e ./e2e - -# Copy key files -COPY ci/chains/gaia/$RELEASE/ibc-0/user_seed.json ./user_seed_ibc-0.json -RUN cat ./user_seed_ibc-0.json -COPY ci/chains/gaia/$RELEASE/ibc-1/user_seed.json ./user_seed_ibc-1.json -RUN cat ./user_seed_ibc-1.json -COPY ci/chains/gaia/$RELEASE/ibc-0/user2_seed.json ./user2_seed_ibc-0.json -RUN cat ./user2_seed_ibc-0.json -COPY ci/chains/gaia/$RELEASE/ibc-1/user2_seed.json ./user2_seed_ibc-1.json -RUN cat ./user2_seed_ibc-1.json - -# Make it executable -RUN chmod +x e2e.sh - -ENTRYPOINT ["/bin/sh"] diff --git a/ci/run-gaiad.sh b/ci/run-gaiad.sh deleted file mode 100755 index 78ac334b64..0000000000 --- a/ci/run-gaiad.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -gaiad start --home=/chain/gaia --grpc.address=$CHAIN_ID:9090 --pruning=nothing --log_level error 2>&1 | tee /chain/gaia/gaiad.log \ No newline at end of file diff --git a/ci/simd.Dockerfile b/ci/simd.Dockerfile deleted file mode 100644 index 2a235cfc5a..0000000000 --- a/ci/simd.Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM alpine -LABEL maintainer="hello@informal.systems" - -EXPOSE 26656 26657 26660 1317 - -ENTRYPOINT ["/usr/bin/simd"] - -CMD ["--home", "/root/.simapp", "start"] - -VOLUME [ "/root" ] - -#Commit ID: c2d40e1099d2c00c02f68bc156c57603640e3590 -COPY cosmos-sdk/build/simd /usr/bin/simd -COPY simapp/ /root/.simapp diff --git a/ci/simple_config.toml b/ci/simple_config.toml deleted file mode 100644 index 0eea92af7a..0000000000 --- a/ci/simple_config.toml +++ /dev/null @@ -1,60 +0,0 @@ -[global] -log_level = 'trace' - -[mode] - -[mode.clients] -enabled = true -refresh = true -misbehaviour = true - -[mode.connections] -enabled = false - -[mode.channels] -enabled = false - -[mode.packets] -enabled = true -clear_interval = 100 -clear_on_start = true -tx_confirmation = true - -[telemetry] -enabled = false -host = '127.0.0.1' -port = 3001 - -[[chains]] -id = 'ibc-0' -rpc_addr = 'http://ibc-0:26657' -grpc_addr = 'http://ibc-0:9090' -websocket_addr = 'ws://ibc-0:26657/websocket' -rpc_timeout = '10s' -account_prefix = 'cosmos' -key_name = 'testkey' -store_prefix = 'ibc' -max_gas = 3000000 -max_msg_num = 30 -max_tx_size = 2097152 -gas_price = { price = 0.001, denom = 'stake' } -clock_drift = '5s' -trusting_period = '14days' -trust_threshold = { numerator = '1', denominator = '3' } - -[[chains]] -id = 'ibc-1' -rpc_addr = 'http://ibc-1:26657' -grpc_addr = 'http://ibc-1:9090' -websocket_addr = 'ws://ibc-1:26657/websocket' -rpc_timeout = '10s' -account_prefix = 'cosmos' -key_name = 'testkey' -store_prefix = 'ibc' -max_gas = 3000000 -max_msg_num = 30 -max_tx_size = 2097152 -gas_price = { price = 0.001, denom = 'stake' } -clock_drift = '5s' -trusting_period = '14days' -trust_threshold = { numerator = '1', denominator = '3' } diff --git a/codecov.yml b/codecov.yml deleted file mode 100644 index a48cba05d6..0000000000 --- a/codecov.yml +++ /dev/null @@ -1,23 +0,0 @@ -codecov: - require_ci_to_pass: yes - allow_coverage_offsets: true - -ignore: - -coverage: - precision: 1 - round: down - range: "70...100" - - status: - project: true - patch: true - changes: true - -parsers: - gcov: - branch_detection: - conditional: yes - loop: yes - method: yes - macro: no diff --git a/config.toml b/config.toml deleted file mode 100644 index 9e3749cdf8..0000000000 --- a/config.toml +++ /dev/null @@ -1,252 +0,0 @@ -# The global section has parameters that apply globally to the relayer operation. -[global] - -# Specify the verbosity for the relayer logging output. Default: 'info' -# Valid options are 'error', 'warn', 'info', 'debug', 'trace'. -log_level = 'info' - - -# Specify the mode to be used by the relayer. [Required] -[mode] - -# Specify the client mode. -[mode.clients] - -# Whether or not to enable the client workers. [Required] -enabled = true - -# Whether or not to enable periodic refresh of clients. [Default: true] -# This feature only applies to clients that underlie an open channel. -# For Tendermint clients, the frequency at which Hermes refreshes them is 2/3 of their -# trusting period (e.g., refresh every ~9 days if the trusting period is 14 days). -# Note: Even if this is disabled, clients will be refreshed automatically if -# there is activity on a connection or channel they are involved with. -refresh = true - -# Whether or not to enable misbehaviour detection for clients. [Default: false] -misbehaviour = true - -# Specify the connections mode. -[mode.connections] - -# Whether or not to enable the connection workers for handshake completion. [Required] -enabled = false - -# Specify the channels mode. -[mode.channels] - -# Whether or not to enable the channel workers for handshake completion. [Required] -enabled = false - -# Specify the packets mode. -[mode.packets] - -# Whether or not to enable the packet workers. [Required] -enabled = true - -# Parametrize the periodic packet clearing feature. -# Interval (in number of blocks) at which pending packets -# should be periodically cleared. A value of '0' will disable -# periodic packet clearing. [Default: 100] -clear_interval = 100 - -# Whether or not to clear packets on start. [Default: false] -clear_on_start = true - -# Toggle the transaction confirmation mechanism. -# The tx confirmation mechanism periodically queries the `/tx_search` RPC -# endpoint to check that previously-submitted transactions -# (to any chain in this config file) have been successfully delivered. -# If they have not been, and `clear_interval = 0`, then those packets are -# queued up for re-submission. -# Experimental feature. Affects telemetry if set to false. -# [Default: true] -tx_confirmation = true - -# The REST section defines parameters for Hermes' built-in RESTful API. -# https://hermes.informal.systems/rest.html -[rest] - -# Whether or not to enable the REST service. Default: false -enabled = true - -# Specify the IPv4/6 host over which the built-in HTTP server will serve the RESTful -# API requests. Default: 127.0.0.1 -host = '127.0.0.1' - -# Specify the port over which the built-in HTTP server will serve the restful API -# requests. Default: 3000 -port = 3000 - - -# The telemetry section defines parameters for Hermes' built-in telemetry capabilities. -# https://hermes.informal.systems/telemetry.html -[telemetry] - -# Whether or not to enable the telemetry service. Default: false -enabled = true - -# Specify the IPv4/6 host over which the built-in HTTP server will serve the metrics -# gathered by the telemetry service. Default: 127.0.0.1 -host = '127.0.0.1' - -# Specify the port over which the built-in HTTP server will serve the metrics gathered -# by the telemetry service. Default: 3001 -port = 3001 - - -# A chains section includes parameters related to a chain and the full node to which -# the relayer can send transactions and queries. -[[chains]] - -# Specify the chain ID. Required -id = 'ibc-0' - -# Specify the RPC address and port where the chain RPC server listens on. Required -rpc_addr = 'http://127.0.0.1:26657' - -# Specify the GRPC address and port where the chain GRPC server listens on. Required -grpc_addr = 'http://127.0.0.1:9090' - -# Specify the WebSocket address and port where the chain WebSocket server -# listens on. Required -websocket_addr = 'ws://127.0.0.1:26657/websocket' - -# Specify the maximum amount of time (duration) that the RPC requests should -# take before timing out. Default: 10s (10 seconds) -# Note: Hermes uses this parameter _only_ in `start` mode; for all other CLIs, -# Hermes uses a large preconfigured timeout (on the order of minutes). -rpc_timeout = '10s' - -# Specify the prefix used by the chain. Required -account_prefix = 'cosmos' - -# Specify the name of the private key to use for signing transactions. Required -# See the Adding Keys chapter for more information about managing signing keys: -# https://hermes.informal.systems/commands/keys/index.html#adding-keys -key_name = 'testkey' - -# Specify the address type which determines: -# 1) address derivation; -# 2) how to retrieve and decode accounts and pubkeys; -# 3) the message signing method. -# The current configuration options are for Cosmos SDK and Ethermint. -# -# Example configuration for chains based on Ethermint library: -# -# address_type = { derivation = 'ethermint', proto_type = { pk_type = '/ethermint.crypto.v1.ethsecp256k1.PubKey' } } -# -# Default: { derivation = 'cosmos' }, i.e. address derivation as in Cosmos SDK. -# Warning: This is an advanced feature! Modify with caution. -address_type = { derivation = 'cosmos' } - -# Specify the store prefix used by the on-chain IBC modules. Required -# Recommended value for Cosmos SDK: 'ibc' -store_prefix = 'ibc' - -# Specify the default amount of gas to be used in case the tx simulation fails, -# and Hermes cannot estimate the amount of gas needed. -# Default: 100 000 -default_gas = 100000 - -# Specify the maximum amount of gas to be used as the gas limit for a transaction. -# Default: 400 000 -max_gas = 400000 - -# Specify the price per gas used of the fee to submit a transaction and -# the denomination of the fee. Required -gas_price = { price = 0.001, denom = 'stake' } - -# Specify the ratio by which to increase the gas estimate used to compute the fee, -# to account for potential estimation error. Default: 0.1, ie. 10%. -# Valid range: 0.0 to 1.0 (inclusive) -gas_adjustment = 1.0 - -# Specify how many IBC messages at most to include in a single transaction. -# Default: 30 -max_msg_num = 30 - -# Specify the maximum size, in bytes, of each transaction that Hermes will submit. -# Default: 2097152 (2 MiB) -max_tx_size = 2097152 - -# Specify the maximum amount of time to tolerate a clock drift. -# The clock drift parameter defines how much new (untrusted) header's time -# can drift into the future. Default: 5s -clock_drift = '5s' - -# Specify the maximum time per block for this chain. -# The block time together with the clock drift are added to the source drift to estimate -# the maximum clock drift when creating a client on this chain. Default: 30s -# For cosmos-SDK chains a good approximation is `timeout_propose` + `timeout_commit` -# Note: This MUST be the same as the `max_expected_time_per_block` genesis parameter for Tendermint chains. -max_block_time = '30s' - -# Specify the amount of time to be used as the light client trusting period. -# It should be significantly less than the unbonding period -# (e.g. unbonding period = 3 weeks, trusting period = 2 weeks). -# Default: 2/3 of the `unbonding period` for Cosmos SDK chains -trusting_period = '14days' - -# Specify the trust threshold for the light client, ie. the maximum fraction of validators -# which have changed between two blocks. -# Default: { numerator = '1', denominator = '3' }, ie. 1/3. -# Warning: This is an advanced feature! Modify with caution. -trust_threshold = { numerator = '1', denominator = '3' } - -# Specify a string that Hermes will use as a memo for each transaction it submits -# to this chain. The string is limited to 50 characters. Default: '' (empty). -# Note: Hermes will append to the string defined here additional -# operational debugging information, e.g., relayer build version. -memo_prefix = '' - -# This section specifies the filters for policy based relaying. -# -# Default: no policy / filters, allow all packets on all channels. -# -# Only packet filtering based on channel identifier can be specified. -# A channel filter has two fields: -# 1. `policy` - one of two types are supported: -# - 'allow': permit relaying _only on_ the port/channel id in the list below, -# - 'deny': permit relaying on any channel _except for_ the list below. -# 2. `list` - the list of channels specified by the port and channel identifiers. -# Optionally, each element may also contains wildcards, for eg. 'ica*' -# to match all identifiers starting with 'ica' or '*' to match all identifiers. -# -# Example configuration of a channel filter, only allowing packet relaying on -# channel with port ID 'transfer' and channel ID 'channel-0', as well as on -# all ICA channels. -# -# [chains.packet_filter] -# policy = 'allow' -# list = [ -# ['ica*', '*'], -# ['transfer', 'channel-0'], -# ] - -# Specify that the transaction fees should be payed from this fee granter's account. -# Optional. If unspecified (the default behavior), then no fee granter is used, and -# the account specified in `key_name` will pay the tx fees for all transactions -# submitted to this chain. -# fee_granter = '' - -[[chains]] -id = 'ibc-1' -rpc_addr = 'http://127.0.0.1:26557' -grpc_addr = 'http://127.0.0.1:9091' -websocket_addr = 'ws://127.0.0.1:26557/websocket' -rpc_timeout = '10s' -account_prefix = 'cosmos' -key_name = 'testkey' -store_prefix = 'ibc' -default_gas = 100000 -max_gas = 400000 -gas_price = { price = 0.001, denom = 'stake' } -gas_adjustment = 0.1 -max_msg_num = 30 -max_tx_size = 2097152 -clock_drift = '5s' -max_block_time = '30s' -trusting_period = '14days' -trust_threshold = { numerator = '1', denominator = '3' } -address_type = { derivation = 'cosmos' } diff --git a/derive/Cargo.toml b/derive/Cargo.toml new file mode 100644 index 0000000000..b7c81019e0 --- /dev/null +++ b/derive/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "ibc-derive" +version = "0.1.0" +edition = "2021" +license = "Apache-2.0" + +[dependencies] +syn = { version = "1.0.99", features = ["extra-traits", "full", "derive", "parsing"] } +quote = "1.0.21" +proc-macro2 = "1.0.43" +convert_case = "0.6.0" +proc-macro-crate = "1.2.1" + +[lib] +proc-macro = true diff --git a/derive/src/client_def.rs b/derive/src/client_def.rs new file mode 100644 index 0000000000..cd047fde67 --- /dev/null +++ b/derive/src/client_def.rs @@ -0,0 +1,719 @@ +use crate::State; +use quote::quote; + +impl State { + fn impl_fn_verify_client_message(&self) -> proc_macro2::TokenStream { + let any_client_state = &self.any_data.client_state_ident; + let any_client_message = &self.any_data.client_message_ident; + let gen_params = &self.generics.params; + let error = &self.current_impl_error; + let crate_ = &self.crate_ident; + let trait_ = &self.current_impl_trait; + let client_state_trait = &self.client_state_trait; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = #client_state_trait::client_type(&client_state).to_owned(); + let (client_state, client_message) = #crate_::downcast!( + client_state => #any_client_state::<#gen_params>::#variant_ident, + client_message => #any_client_message::#variant_ident, + ) + .ok_or_else(|| #error::client_args_type_mismatch(client_type))?; + + #trait_::verify_client_message::(client, ctx, client_id, client_state, client_message) + } + } + }); + + quote! { + #[doc = "Validate an incoming client message"] + fn verify_client_message( + &self, + ctx: &Ctx, + client_id: #crate_::core::ics24_host::identifier::ClientId, + client_state: Self::ClientState, + client_message: Self::ClientMessage, + ) -> ::core::result::Result<(), #error> + where + Ctx: #crate_::core::ics26_routing::context::ReaderContext, + { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_update_state(&self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + let error = &self.current_impl_error; + let trait_ = &self.current_impl_trait; + let client_state_trait = &self.client_state_trait; + let any_client_state = &self.any_data.client_state_ident; + let any_client_message = &self.any_data.client_message_ident; + let gen_params = &self.generics.params; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = #client_state_trait::client_type(&client_state).to_owned(); + let (client_state, client_message) = #crate_::downcast!( + client_state => #any_client_state::<#gen_params>::#variant_ident, + client_message => #any_client_message::#variant_ident, + ) + .ok_or_else(|| #error::client_args_type_mismatch(client_type))?; + + let (new_state, new_consensus) = + #trait_::update_state(client, ctx, client_id, client_state, client_message)?; + + Ok((Self::ClientState::#variant_ident(new_state), new_consensus)) + } + } + }); + + quote! { + #[doc = "Validates an incoming `client_message` against the latest consensus state of this client."] + fn update_state( + &self, + ctx: &Ctx, + client_id: #crate_::core::ics24_host::identifier::ClientId, + client_state: Self::ClientState, + client_message: Self::ClientMessage, + ) -> ::core::result::Result<(Self::ClientState, #crate_::core::ics02_client::client_def::ConsensusUpdateResult), #error> + where + Ctx: #crate_::core::ics26_routing::context::ReaderContext, + { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_update_state_on_misbehaviour(&self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + let trait_ = &self.current_impl_trait; + let error = &self.current_impl_error; + let any_client_state = &self.any_data.client_state_ident; + let any_client_message = &self.any_data.client_message_ident; + let gen_params = &self.generics.params; + let client_state_trait = &self.client_state_trait; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = #client_state_trait::client_type(&client_state).to_owned(); + let (client_state, client_message) = #crate_::downcast!( + client_state => #any_client_state::<#gen_params>::#variant_ident, + client_message => #any_client_message::#variant_ident, + ) + .ok_or_else(|| #error::client_args_type_mismatch(client_type))?; + + let client_state = #trait_::update_state_on_misbehaviour(client, client_state, client_message)?; + Ok(Self::ClientState::#variant_ident(client_state)) + } + } + }); + + quote! { + fn update_state_on_misbehaviour( + &self, + client_state: Self::ClientState, + client_message: Self::ClientMessage, + ) -> ::core::result::Result { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_check_for_misbehaviour(&self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + let error = &self.current_impl_error; + let trait_ = &self.current_impl_trait; + let client_state_trait = &self.client_state_trait; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = #client_state_trait::client_type(&client_state).to_owned(); + let (client_state, client_message) = #crate_::downcast!( + client_state => Self::ClientState::#variant_ident, + client_message => Self::ClientMessage::#variant_ident, + ) + .ok_or_else(|| #error::client_args_type_mismatch(client_type))?; + #trait_::check_for_misbehaviour(client, ctx, client_id, client_state, client_message) + } + } + }); + + quote! { + #[doc = "Checks for misbehaviour in an incoming client_message"] + fn check_for_misbehaviour( + &self, + ctx: &Ctx, + client_id: #crate_::core::ics24_host::identifier::ClientId, + client_state: Self::ClientState, + client_message: Self::ClientMessage, + ) -> ::core::result::Result + where + Ctx: #crate_::core::ics26_routing::context::ReaderContext, + { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_upgrade_and_update_state(&self) -> proc_macro2::TokenStream { + let error = &self.current_impl_error; + let crate_ = &self.crate_ident; + let trait_ = &self.current_impl_trait; + let client_state_trait = &self.client_state_trait; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = #client_state_trait::client_type(old_client_state).to_owned(); + let (upgrade_client_state, upgrade_consensus_state) = #crate_::downcast!( + upgrade_client_state => Self::ClientState::#variant_ident, + upgrade_consensus_state => Self::ConsensusState::#variant_ident, + ) + .ok_or_else(|| #error::client_args_type_mismatch(client_type.clone()))?; + + let old_client_state = #crate_::downcast!( + old_client_state => Self::ClientState::#variant_ident + ) + .ok_or_else(|| #error::client_args_type_mismatch(client_type))?; + + let (new_state, new_consensus) = #trait_::verify_upgrade_and_update_state::( + client, + ctx, + client_id, + old_client_state, + upgrade_client_state, + upgrade_consensus_state, + proof_upgrade_client, + proof_upgrade_consensus_state, + )?; + + Ok((Self::ClientState::#variant_ident(new_state), new_consensus)) + } + } + }); + + quote! { + fn verify_upgrade_and_update_state( + &self, + ctx: &Ctx, + client_id: #crate_::core::ics24_host::identifier::ClientId, + old_client_state: &Self::ClientState, + upgrade_client_state: &Self::ClientState, + upgrade_consensus_state: &Self::ConsensusState, + proof_upgrade_client: ::alloc::vec::Vec, + proof_upgrade_consensus_state: ::alloc::vec::Vec, + ) -> ::core::result::Result<(Self::ClientState, #crate_::core::ics02_client::client_def::ConsensusUpdateResult), #error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_client_consensus_state(&self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + let trait_ = &self.current_impl_trait; + let error = &self.current_impl_error; + let client_state_trait = &self.client_state_trait; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = #client_state_trait::client_type(client_state).to_owned(); + let client_state = #crate_::downcast!( + client_state => Self::ClientState::#variant_ident + ) + .ok_or_else(|| #error::client_args_type_mismatch(client_type))?; + + #trait_::verify_client_consensus_state( + client, + ctx, + client_state, + height, + prefix, + proof, + root, + client_id, + consensus_height, + expected_consensus_state, + ) + } + } + }); + + quote! { + fn verify_client_consensus_state( + &self, + ctx: &Ctx, + client_state: &Self::ClientState, + height: #crate_::core::ics02_client::height::Height, + prefix: &#crate_::core::ics23_commitment::commitment::CommitmentPrefix, + proof: &#crate_::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &#crate_::core::ics23_commitment::commitment::CommitmentRoot, + client_id: &#crate_::core::ics24_host::identifier::ClientId, + consensus_height: #crate_::core::ics02_client::height::Height, + expected_consensus_state: &Ctx::AnyConsensusState, + ) -> ::core::result::Result<(), #error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_connection_state(&self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + let trait_ = &self.current_impl_trait; + let error = &self.current_impl_error; + let client_state_trait = &self.client_state_trait; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = #client_state_trait::client_type(client_state).to_owned(); + let client_state = #crate_::downcast!(client_state => Self::ClientState::#variant_ident) + .ok_or_else(|| #error::client_args_type_mismatch(client_type))?; + + #trait_::verify_connection_state( + client, + ctx, + client_id, + client_state, + height, + prefix, + proof, + root, + connection_id, + expected_connection_end, + ) + } + } + }); + + quote! { + fn verify_connection_state( + &self, + ctx: &Ctx, + client_id: &#crate_::core::ics24_host::identifier::ClientId, + client_state: &Self::ClientState, + height: #crate_::core::ics02_client::height::Height, + prefix: &#crate_::core::ics23_commitment::commitment::CommitmentPrefix, + proof: &#crate_::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &#crate_::core::ics23_commitment::commitment::CommitmentRoot, + connection_id: &#crate_::core::ics24_host::identifier::ConnectionId, + expected_connection_end: &#crate_::core::ics03_connection::connection::ConnectionEnd, + ) -> ::core::result::Result<(), #error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_channel_state(&self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + let trait_ = &self.current_impl_trait; + let error = &self.current_impl_error; + let client_state_trait = &self.client_state_trait; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = #client_state_trait::client_type(client_state).to_owned(); + let client_state = #crate_::downcast!(client_state => Self::ClientState::#variant_ident) + .ok_or_else(|| #error::client_args_type_mismatch(client_type))?; + + #trait_::verify_channel_state( + client, + ctx, + client_id, + client_state, + height, + prefix, + proof, + root, + port_id, + channel_id, + expected_channel_end, + ) + } + } + }); + + quote! { + fn verify_channel_state( + &self, + ctx: &Ctx, + client_id: &#crate_::core::ics24_host::identifier::ClientId, + client_state: &Self::ClientState, + height: #crate_::core::ics02_client::height::Height, + prefix: &#crate_::core::ics23_commitment::commitment::CommitmentPrefix, + proof: &#crate_::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &#crate_::core::ics23_commitment::commitment::CommitmentRoot, + port_id: &#crate_::core::ics24_host::identifier::PortId, + channel_id: &#crate_::core::ics24_host::identifier::ChannelId, + expected_channel_end: &#crate_::core::ics04_channel::channel::ChannelEnd, + ) -> ::core::result::Result<(), #error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_client_full_state(&self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + let trait_ = &self.current_impl_trait; + let error = &self.current_impl_error; + let client_state_trait = &self.client_state_trait; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = #client_state_trait::client_type(client_state).to_owned(); + let client_state = #crate_::downcast!( + client_state => Self::ClientState::#variant_ident + ) + .ok_or_else(|| #error::client_args_type_mismatch(client_type))?; + + #trait_::verify_client_full_state( + client, + ctx, + client_state, + height, + prefix, + proof, + root, + client_id, + client_state_on_counterparty, + ) + } + } + }); + + quote! { + fn verify_client_full_state( + &self, + ctx: &Ctx, + client_state: &Self::ClientState, + height: #crate_::core::ics02_client::height::Height, + prefix: &#crate_::core::ics23_commitment::commitment::CommitmentPrefix, + proof: &#crate_::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &#crate_::core::ics23_commitment::commitment::CommitmentRoot, + client_id: &#crate_::core::ics24_host::identifier::ClientId, + client_state_on_counterparty: &Ctx::AnyClientState, + ) -> ::core::result::Result<(), #error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_packet_data(&self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + let trait_ = &self.current_impl_trait; + let error = &self.current_impl_error; + let client_state_trait = &self.client_state_trait; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = #client_state_trait::client_type(client_state).to_owned(); + let client_state = #crate_::downcast!( + client_state => Self::ClientState::#variant_ident + ) + .ok_or_else(|| #error::client_args_type_mismatch(client_type))?; + + #trait_::verify_packet_data( + client, + ctx, + client_id, + client_state, + height, + connection_end, + proof, + root, + port_id, + channel_id, + sequence, + commitment, + ) + } + } + }); + + quote! { + fn verify_packet_data( + &self, + ctx: &Ctx, + client_id: &#crate_::core::ics24_host::identifier::ClientId, + client_state: &Self::ClientState, + height: #crate_::core::ics02_client::height::Height, + connection_end: &#crate_::core::ics03_connection::connection::ConnectionEnd, + proof: &#crate_::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &#crate_::core::ics23_commitment::commitment::CommitmentRoot, + port_id: &#crate_::core::ics24_host::identifier::PortId, + channel_id: &#crate_::core::ics24_host::identifier::ChannelId, + sequence: #crate_::core::ics04_channel::packet::Sequence, + commitment: #crate_::core::ics04_channel::commitment::PacketCommitment, + ) -> ::core::result::Result<(), #error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_packet_acknowledgement(&self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + let trait_ = &self.current_impl_trait; + let error = &self.current_impl_error; + let client_state_trait = &self.client_state_trait; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = #client_state_trait::client_type(client_state).to_owned(); + let client_state = #crate_::downcast!( + client_state => Self::ClientState::#variant_ident + ) + .ok_or_else(|| #error::client_args_type_mismatch(client_type))?; + + #trait_::verify_packet_acknowledgement( + client, + ctx, + client_id, + client_state, + height, + connection_end, + proof, + root, + port_id, + channel_id, + sequence, + ack_commitment, + ) + } + } + }); + + quote! { + fn verify_packet_acknowledgement( + &self, + ctx: &Ctx, + client_id: &#crate_::core::ics24_host::identifier::ClientId, + client_state: &Self::ClientState, + height: #crate_::core::ics02_client::height::Height, + connection_end: &#crate_::core::ics03_connection::connection::ConnectionEnd, + proof: &#crate_::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &#crate_::core::ics23_commitment::commitment::CommitmentRoot, + port_id: &#crate_::core::ics24_host::identifier::PortId, + channel_id: &#crate_::core::ics24_host::identifier::ChannelId, + sequence: #crate_::core::ics04_channel::packet::Sequence, + ack_commitment: #crate_::core::ics04_channel::commitment::AcknowledgementCommitment, + ) -> ::core::result::Result<(), #error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_next_sequence_recv(&self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + let trait_ = &self.current_impl_trait; + let error = &self.current_impl_error; + let client_state_trait = &self.client_state_trait; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = #client_state_trait::client_type(client_state).to_owned(); + let client_state = #crate_::downcast!( + client_state => Self::ClientState::#variant_ident + ) + .ok_or_else(|| #error::client_args_type_mismatch(client_type))?; + + #trait_::verify_next_sequence_recv( + client, + ctx, + client_id, + client_state, + height, + connection_end, + proof, + root, + port_id, + channel_id, + sequence, + ) + } + } + }); + + quote! { + fn verify_next_sequence_recv( + &self, + ctx: &Ctx, + client_id: &#crate_::core::ics24_host::identifier::ClientId, + client_state: &Self::ClientState, + height: #crate_::core::ics02_client::height::Height, + connection_end: &#crate_::core::ics03_connection::connection::ConnectionEnd, + proof: &#crate_::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &#crate_::core::ics23_commitment::commitment::CommitmentRoot, + port_id: &#crate_::core::ics24_host::identifier::PortId, + channel_id: &#crate_::core::ics24_host::identifier::ChannelId, + sequence: #crate_::core::ics04_channel::packet::Sequence, + ) -> ::core::result::Result<(), #error> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_verify_packet_receipt_absence(&self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + let trait_ = &self.current_impl_trait; + let error = &self.current_impl_error; + let client_state_trait = &self.client_state_trait; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let _client_state_path = &client.client_state_path; + quote! { + #(#attrs)* + Self::#variant_ident(client) => { + let client_type = #client_state_trait::client_type(client_state).to_owned(); + let client_state = #crate_::downcast!( + client_state => Self::ClientState::#variant_ident + ) + .ok_or_else(|| #error::client_args_type_mismatch(client_type))?; + + #trait_::verify_packet_receipt_absence( + client, + ctx, + client_id, + client_state, + height, + connection_end, + proof, + root, + port_id, + channel_id, + sequence, + ) + } + } + }); + + quote! { + fn verify_packet_receipt_absence( + &self, + ctx: &Ctx, + client_id: &#crate_::core::ics24_host::identifier::ClientId, + client_state: &Self::ClientState, + height: #crate_::core::ics02_client::height::Height, + connection_end: &#crate_::core::ics03_connection::connection::ConnectionEnd, + proof: &#crate_::core::ics23_commitment::commitment::CommitmentProofBytes, + root: &#crate_::core::ics23_commitment::commitment::CommitmentRoot, + port_id: &#crate_::core::ics24_host::identifier::PortId, + channel_id: &#crate_::core::ics24_host::identifier::ChannelId, + sequence: #crate_::core::ics04_channel::packet::Sequence, + ) -> ::core::result::Result<(), #error> { + match self { + #(#cases)* + } + } + } + } + + pub fn impl_client_def(&mut self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + self.current_impl_trait = + syn::parse2(quote! { #crate_::core::ics02_client::client_def::ClientDef }).unwrap(); + self.current_impl_error = + syn::parse2(quote! { #crate_::core::ics02_client::error::Error }).unwrap(); + + let this = &self.self_ident; + let any_client_message = &self.any_data.client_message_ident; + let any_client_state = &self.any_data.client_state_ident; + let any_consensus_state = &self.any_data.consensus_state_ident; + let client_def_trait = &self.current_impl_trait; + let (impl_generics, ty_generics, where_clause) = self.generics.split_for_impl(); + + let fn_verify_client_message = self.impl_fn_verify_client_message(); + let fn_update_state = self.impl_fn_update_state(); + let fn_update_state_on_misbehaviour = self.impl_fn_update_state_on_misbehaviour(); + let fn_check_for_misbehaviour = self.impl_fn_check_for_misbehaviour(); + let fn_verify_upgrade_and_update_state = self.impl_fn_verify_upgrade_and_update_state(); + let fn_verify_client_consensus_state = self.impl_fn_verify_client_consensus_state(); + let fn_verify_connection_state = self.impl_fn_verify_connection_state(); + let fn_verify_channel_state = self.impl_fn_verify_channel_state(); + let fn_verify_client_full_state = self.impl_fn_verify_client_full_state(); + let fn_verify_packet_data = self.impl_fn_verify_packet_data(); + let fn_verify_packet_acknowledgement = self.impl_fn_verify_packet_acknowledgement(); + let fn_verify_next_sequence_recv = self.impl_fn_verify_next_sequence_recv(); + let fn_verify_packet_receipt_absence = self.impl_fn_verify_packet_receipt_absence(); + + quote! { + impl #impl_generics #client_def_trait for #this #ty_generics #where_clause { + type ClientMessage = #any_client_message; + type ClientState = #any_client_state::<#ty_generics>; + type ConsensusState = #any_consensus_state; + + #fn_verify_client_message + #fn_update_state + #fn_update_state_on_misbehaviour + #fn_check_for_misbehaviour + #fn_verify_upgrade_and_update_state + #fn_verify_client_consensus_state + #fn_verify_connection_state + #fn_verify_channel_state + #fn_verify_client_full_state + #fn_verify_packet_data + #fn_verify_packet_acknowledgement + #fn_verify_next_sequence_recv + #fn_verify_packet_receipt_absence + } + } + } +} diff --git a/derive/src/client_messaage.rs b/derive/src/client_messaage.rs new file mode 100644 index 0000000000..67c6b0f482 --- /dev/null +++ b/derive/src/client_messaage.rs @@ -0,0 +1,51 @@ +use crate::State; + +use quote::quote; + +impl State { + pub fn impl_fn_height(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let trait_ = &self.current_impl_trait; + quote! { + #(#attrs)* + Self::#variant_ident(state) => #trait_::height(state), + } + }); + + let crate_ = &self.crate_ident; + quote! { + fn height(&self) -> #crate_::core::ics02_client::height::Height { + match self { + #(#cases)* + } + } + } + } + + pub fn impl_client_message(&mut self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + let this = &self.self_ident; + self.current_impl_trait = + syn::parse2(quote! { #crate_::core::ics02_client::client_message::ClientMessage }) + .unwrap(); + self.current_impl_error = + syn::parse2(quote! { #crate_::core::ics02_client::error::Error }).unwrap(); + let trait_ = &self.current_impl_trait; + + let (impl_generics, ty_generics, where_clause) = self.generics.split_for_impl(); + + let fn_downcast = self.impl_fn_downcast(); + let fn_wrap = self.impl_fn_wrap(); + let fn_encode_to_vec = self.impl_fn_encode_to_vec(); + + quote! { + impl #impl_generics #trait_ for #this #ty_generics #where_clause { + #fn_downcast + #fn_wrap + #fn_encode_to_vec + } + } + } +} diff --git a/derive/src/client_state.rs b/derive/src/client_state.rs new file mode 100644 index 0000000000..874cf66148 --- /dev/null +++ b/derive/src/client_state.rs @@ -0,0 +1,206 @@ +use crate::State; + +use quote::quote; + +impl State { + fn impl_fn_chain_id(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let trait_ = &self.current_impl_trait; + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(state) => #trait_::chain_id(state), + } + }); + + let crate_ = &self.crate_ident; + quote! { + fn chain_id(&self) -> #crate_::core::ics24_host::identifier::ChainId { + match self { + #(#cases)* + } + } + } + } + + pub fn impl_fn_client_def(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let trait_ = &self.current_impl_trait; + quote! { + #(#attrs)* + Self::#variant_ident(state) => AnyClient::#variant_ident(#trait_::client_def(state)), + } + }); + + quote! { + fn client_def(&self) -> Self::ClientDef { + match self { + #(#cases)* + } + } + } + } + + pub fn impl_fn_client_type(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let trait_ = &self.current_impl_trait; + quote! { + #(#attrs)* + Self::#variant_ident(state) => #trait_::client_type(state), + } + }); + + let crate_ = &self.crate_ident; + quote! { + fn client_type(&self) -> #crate_::core::ics02_client::client_state::ClientType { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_latest_height(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let trait_ = &self.current_impl_trait; + quote! { + #(#attrs)* + Self::#variant_ident(state) => #trait_::latest_height(state), + } + }); + + let crate_ = &self.crate_ident; + quote! { + fn latest_height(&self) -> #crate_::core::ics02_client::height::Height { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_frozen_height(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let trait_ = &self.current_impl_trait; + quote! { + #(#attrs)* + Self::#variant_ident(state) => #trait_::frozen_height(state), + } + }); + + let crate_ = &self.crate_ident; + quote! { + fn frozen_height(&self) -> ::core::option::Option<#crate_::core::ics02_client::height::Height> { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_upgrade(&self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let trait_ = &self.current_impl_trait; + quote! { + #(#attrs)* + Self::#variant_ident(state) => { + let upgrade_options = #crate_::downcast!(upgrade_options => Self::UpgradeOptions::#variant_ident) + .expect(&format!("upgrade options should be {}", stringify!(#variant_ident))); + + Self::#variant_ident(#trait_::upgrade( + state, + upgrade_height, + upgrade_options, + chain_id, + )) + } + } + }); + + quote! { + fn upgrade( + self, + upgrade_height: #crate_::core::ics02_client::height::Height, + upgrade_options: Self::UpgradeOptions, + chain_id: #crate_::core::ics24_host::identifier::ChainId, + ) -> Self { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_expired(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let trait_ = &self.current_impl_trait; + quote! { + #(#attrs)* + Self::#variant_ident(state) => #trait_::expired(state, elapsed), + } + }); + + quote! { + fn expired(&self, elapsed: ::core::time::Duration) -> bool { + match self { + #(#cases)* + } + } + } + } + + pub fn impl_client_state(&mut self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + self.current_impl_trait = + syn::parse2(quote! { #crate_::core::ics02_client::client_state::ClientState }).unwrap(); + self.current_impl_error = + syn::parse2(quote! { #crate_::core::ics02_client::error::Error }).unwrap(); + + let this = &self.self_ident; + let (impl_generics, ty_generics, where_clause) = self.generics.split_for_impl(); + + let fn_chain_id = self.impl_fn_chain_id(); + let fn_client_type = self.impl_fn_client_type(); + let fn_client_def = self.impl_fn_client_def(); + let fn_latest_height = self.impl_fn_latest_height(); + let fn_frozen_height = self.impl_fn_frozen_height(); + let fn_upgrade = self.impl_fn_upgrade(); + let fn_expired = self.impl_fn_expired(); + let fn_downcast = self.impl_fn_downcast(); + let fn_wrap = self.impl_fn_wrap(); + let fn_encode_to_vec = self.impl_fn_encode_to_vec(); + let current_impl_trait = &self.current_impl_trait; + + quote! { + impl #impl_generics #current_impl_trait for #this #ty_generics #where_clause { + type UpgradeOptions = AnyUpgradeOptions; // TODO: make variable? + type ClientDef = AnyClient #ty_generics; + + #fn_chain_id + #fn_client_type + #fn_client_def + #fn_latest_height + #fn_frozen_height + #fn_upgrade + #fn_expired + #fn_downcast + #fn_wrap + #fn_encode_to_vec + } + } + } +} diff --git a/derive/src/coercion.rs b/derive/src/coercion.rs new file mode 100644 index 0000000000..f82838fa66 --- /dev/null +++ b/derive/src/coercion.rs @@ -0,0 +1,53 @@ +use crate::State; +use quote::quote; + +impl State { + pub(crate) fn impl_fn_downcast(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + quote! { + #(#attrs)* + Self::#variant_ident(state) => state.downcast::(), + } + }); + + quote! { + fn downcast(self) -> ::core::option::Option { + match self { + #(#cases)* + } + } + } + } + + pub(crate) fn impl_fn_wrap(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let client_state_type = &client.inner_ty_path; + quote! { + #(#attrs)* + if let Some(state) = sub_state.downcast_ref::<#client_state_type>() { + return Some(Self::#variant_ident(state.clone())); + } + } + }); + + quote! { + fn wrap(sub_state: &dyn ::core::any::Any) -> ::core::option::Option { + #(#cases)* + None + } + } + } + + pub(crate) fn impl_fn_encode_to_vec(&self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + quote! { + fn encode_to_vec(&self) -> ::alloc::vec::Vec { + #crate_::protobuf::Protobuf::encode_vec(self) + } + } + } +} diff --git a/derive/src/consensus_state.rs b/derive/src/consensus_state.rs new file mode 100644 index 0000000000..5aa6ef0123 --- /dev/null +++ b/derive/src/consensus_state.rs @@ -0,0 +1,78 @@ +use crate::State; + +use quote::quote; + +impl State { + fn impl_fn_root(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let trait_ = &self.current_impl_trait; + quote! { + #(#attrs)* + Self::#variant_ident(state) => #trait_::root(state), + } + }); + + let crate_ = &self.crate_ident; + quote! { + fn root(&self) -> &#crate_::core::ics23_commitment::commitment::CommitmentRoot { + match self { + #(#cases)* + } + } + } + } + + fn impl_fn_timestamp(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let trait_ = &self.current_impl_trait; + quote! { + #(#attrs)* + Self::#variant_ident(state) => #trait_::timestamp(state), + } + }); + + let crate_ = &self.crate_ident; + quote! { + fn timestamp(&self) -> #crate_::timestamp::Timestamp { + match self { + #(#cases)* + } + } + } + } + + pub fn impl_consensus_state(&mut self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + self.current_impl_trait = + syn::parse2(quote! { #crate_::core::ics02_client::client_consensus::ConsensusState }) + .unwrap(); + self.current_impl_error = + syn::parse2(quote! { #crate_::core::ics02_client::error::Error }).unwrap(); + + let this = &self.self_ident; + let (impl_generics, ty_generics, where_clause) = self.generics.split_for_impl(); + let trait_ = &self.current_impl_trait; + + let fn_root = self.impl_fn_root(); + let fn_timestamp = self.impl_fn_timestamp(); + let fn_downcast = self.impl_fn_downcast(); + let fn_wrap = self.impl_fn_wrap(); + let fn_encode_to_vec = self.impl_fn_encode_to_vec(); + + quote! { + impl #impl_generics #trait_ for #this #ty_generics #where_clause { + type Error = ::core::convert::Infallible; + + #fn_root + #fn_timestamp + #fn_downcast + #fn_wrap + #fn_encode_to_vec + } + } + } +} diff --git a/derive/src/lib.rs b/derive/src/lib.rs new file mode 100644 index 0000000000..3414f91d6c --- /dev/null +++ b/derive/src/lib.rs @@ -0,0 +1,178 @@ +mod client_def; +mod client_state; +mod coercion; +mod consensus_state; +mod client_messaage; +mod misbehaviour; +mod protobuf; +mod utils; + +use proc_macro::{Span, TokenStream}; +use proc_macro2::Ident; +use quote::quote; + +use crate::utils::{generate_crate_access_2018, ident_path}; +use syn::{parse_macro_input, Data, DeriveInput, Generics, Path, Type, TypePath}; + +struct AnyData { + pub client_message_ident: Ident, + pub client_state_ident: Ident, + pub consensus_state_ident: Ident, +} + +struct ClientData { + pub variant_ident: Ident, + pub inner_ty_path: TypePath, + pub client_state_path: TypePath, + pub attrs: Vec, + pub proto_ty_url: Option, + pub proto_decode_error: Option, +} + +impl ClientData { + pub fn new( + variant_ident: Ident, + inner_ty_path: TypePath, + attrs: Vec, + proto_ty_url: Option, + proto_decode_error: Option, + ) -> Self { + let client_state_path = + ident_path(Ident::new(&format!("{}ClientState", variant_ident), variant_ident.span())); + Self { + variant_ident, + inner_ty_path, + client_state_path, + attrs, + proto_ty_url, + proto_decode_error, + } + } +} + +struct State { + pub any_data: AnyData, + pub clients: Vec, + pub self_ident: Ident, + pub generics: Generics, + pub crate_ident: Path, + pub current_impl_trait: TypePath, + pub current_impl_error: TypePath, + pub client_state_trait: TypePath, +} + +#[proc_macro_derive(ClientDef, attributes(ibc))] +pub fn derive_client_def(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let mut state = State::from_input(input, client_data_with_proto_attrs); + state.impl_client_def().into() +} + +#[proc_macro_derive(ClientState, attributes(ibc))] +pub fn derive_client_state(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let mut state = State::from_input(input, client_data_with_proto_attrs); + state.impl_client_state().into() +} + +#[proc_macro_derive(ConsensusState, attributes(ibc))] +pub fn derive_consensus_state(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let mut state = State::from_input(input, client_data_with_proto_attrs); + state.impl_consensus_state().into() +} + +#[proc_macro_derive(ClientMessage, attributes(ibc))] +pub fn derive_client_message(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let mut state = State::from_input(input, client_data_with_proto_attrs); + state.impl_client_message().into() +} + +#[proc_macro_derive(Misbehaviour, attributes(ibc))] +pub fn derive_misbehaviour(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let mut state = State::from_input(input, client_data_with_proto_attrs); + state.impl_misbehaviour().into() +} + +#[proc_macro_derive(Protobuf, attributes(ibc))] +pub fn derive_protobuf(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + let state = State::from_input(input, client_data_with_proto_attrs); + state.impl_protobuf().into() +} + +fn client_data_with_proto_attrs(variant: &syn::Variant) -> ClientData { + assert_eq!(variant.fields.len(), 1, "Only single field variants are supported"); + let field = variant.fields.iter().next().unwrap(); + let client_def_path = match &field.ty { + Type::Path(p) => p.clone(), + _ => panic!("Only path types are supported"), + }; + let mut proto_url = None; + let mut proto_decode_error = None; + let attrs = variant + .attrs + .iter() + .filter(|attr| { + let string = format!("{}", attr.path.segments.first().unwrap().ident); + if string == "ibc" { + let meta = attr.parse_meta().unwrap(); + if let syn::Meta::List(list) = meta { + for nested in list.nested { + if let syn::NestedMeta::Meta(syn::Meta::NameValue(nv)) = nested { + let ident = &nv.path.segments.first().unwrap().ident; + if let syn::Lit::Str(lit) = nv.lit { + if ident == "proto_url" { + assert!( + proto_url.is_none(), + "Only one proto type url is allowed" + ); + proto_url = Some(Ident::new(&lit.value(), lit.span())); + } else if ident == "proto_decode_err" { + assert!( + proto_decode_error.is_none(), + "Only one proto decode error is allowed" + ); + proto_decode_error = Some(Ident::new(&lit.value(), lit.span())); + } + } + } + } + } + } + string == "cfg" + }) + .cloned() + .collect(); + + ClientData::new(variant.ident.clone(), client_def_path, attrs, proto_url, proto_decode_error) +} + +impl State { + fn from_input(input: DeriveInput, client_fn: impl Fn(&syn::Variant) -> ClientData) -> Self { + let data = match &input.data { + Data::Enum(data) => data, + _ => panic!("Only enums are supported"), + }; + let span = input.ident.span(); + let crate_ = generate_crate_access_2018("ibc").expect("ibc-rs crate not found"); + let client_state_trait = + syn::parse2(quote! { #crate_::core::ics02_client::client_state::ClientState }).unwrap(); + State { + self_ident: input.ident, + any_data: AnyData { + client_message_ident: Ident::new("AnyClientMessage", span), + client_state_ident: Ident::new("AnyClientState", span), + consensus_state_ident: Ident::new("AnyConsensusState", span), + }, + clients: data.variants.iter().map(client_fn).collect(), + generics: input.generics.clone(), + crate_ident: crate_, + current_impl_trait: ident_path(Ident::new("UNKNOWN_TRAIT", Span::mixed_site().into())), + current_impl_error: ident_path(Ident::new("UNKNOWN_ERROR", Span::mixed_site().into())), + client_state_trait, + } + } +} diff --git a/derive/src/misbehaviour.rs b/derive/src/misbehaviour.rs new file mode 100644 index 0000000000..5ea86aaa74 --- /dev/null +++ b/derive/src/misbehaviour.rs @@ -0,0 +1,54 @@ +use crate::State; + +use quote::quote; + +impl State { + pub fn impl_fn_client_id(&self) -> proc_macro2::TokenStream { + let cases = self.clients.iter().map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let trait_ = &self.current_impl_trait; + quote! { + #(#attrs)* + Self::#variant_ident(misbehaviour) => #trait_::client_id(misbehaviour), + } + }); + + let crate_ = &self.crate_ident; + quote! { + fn client_id(&self) -> &#crate_::core::ics24_host::identifier::ClientId { + match self { + #(#cases)* + } + } + } + } + + pub fn impl_misbehaviour(&mut self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + self.current_impl_trait = + syn::parse2(quote! { #crate_::core::ics02_client::misbehaviour::Misbehaviour }) + .unwrap(); + self.current_impl_error = + syn::parse2(quote! { #crate_::core::ics02_client::error::Error }).unwrap(); + let this = &self.self_ident; + let trait_ = &self.current_impl_trait; + let (impl_generics, ty_generics, where_clause) = self.generics.split_for_impl(); + + let fn_client_id = self.impl_fn_client_id(); + let fn_height = self.impl_fn_height(); + let fn_downcast = self.impl_fn_downcast(); + let fn_wrap = self.impl_fn_wrap(); + let fn_encode_to_vec = self.impl_fn_encode_to_vec(); + + quote! { + impl #impl_generics #trait_ for #this #ty_generics #where_clause { + #fn_client_id + #fn_height + #fn_downcast + #fn_wrap + #fn_encode_to_vec + } + } + } +} diff --git a/derive/src/protobuf.rs b/derive/src/protobuf.rs new file mode 100644 index 0000000000..376cbb5beb --- /dev/null +++ b/derive/src/protobuf.rs @@ -0,0 +1,106 @@ +use crate::{generate_crate_access_2018, State}; +use convert_case::{Case, Casing}; + +use quote::quote; + +impl State { + pub fn impl_try_from_any(&self) -> proc_macro2::TokenStream { + let this = &self.self_ident; + let (impl_generics, ty_generics, where_clause) = self.generics.split_for_impl(); + let crate_ = &self.crate_ident; + let error = quote!(#crate_::core::ics02_client::error::Error); + + let cases = self.clients.iter().filter_map(|client| { + let type_url = client.proto_ty_url.as_ref()?; + let decode_err = client.proto_decode_error.clone().unwrap_or_else(|| { + let string_without_any = &this.to_string()[3..]; + syn::parse_str(&format!("decode_raw_{}", string_without_any.to_case(Case::Snake))) + .unwrap() + }); + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let inner_ty = &client.inner_ty_path; + Some(quote! { + #(#attrs)* + #type_url => Ok(Self::#variant_ident( + <#inner_ty>::decode_vec(&value.value) + .map_err(#error::#decode_err)?, + )), + }) + }); + let ibc_proto = + generate_crate_access_2018("ibc-proto").expect("'ibc-proto' crate not found"); + let proto_any = quote! { #ibc_proto::google::protobuf::Any }; + + // TODO: fix up error variants used in decoding + quote! { + impl #impl_generics ::core::convert::TryFrom<#proto_any> for #this #ty_generics #where_clause { + type Error = #error; + + fn try_from(value: #proto_any) -> ::core::result::Result { + match value.type_url.as_str() { + "" => Err(#error::empty_consensus_state_response()), + #(#cases)* + _ => Err(#error::unknown_consensus_state_type(value.type_url)), + } + } + } + } + } + + pub fn impl_from_self_for_any(&self) -> proc_macro2::TokenStream { + let this = &self.self_ident; + let gens = &self.generics; + let gens_where = &self.generics.where_clause; + let gen_params = &self.generics.params; + let ibc_proto = + generate_crate_access_2018("ibc-proto").expect("'ibc-proto' crate not found"); + let proto_any = quote! { #ibc_proto::google::protobuf::Any }; + + let cases = self.clients.iter().filter_map(|client| { + let variant_ident = &client.variant_ident; + let attrs = &client.attrs; + let type_url = client.proto_ty_url.as_ref()?; + Some(quote! { + #(#attrs)* + #this ::<#gen_params> ::#variant_ident(value) => #proto_any { + type_url: ::alloc::string::ToString::to_string(&#type_url), + value: value.encode_to_vec(), + }, + }) + }); + + quote! { + impl #gens ::core::convert::From<#this #gens> for #proto_any #gens_where { + fn from(value: #this #gens) -> Self { + match value { + #(#cases)* + } + } + } + } + } + + pub fn impl_protobuf(&self) -> proc_macro2::TokenStream { + let crate_ = &self.crate_ident; + let this = &self.self_ident; + let (impl_generics, ty_generics, where_clause) = self.generics.split_for_impl(); + + let impl_try_from_any = self.impl_try_from_any(); + let impl_from_self_for_any = self.impl_from_self_for_any(); + + let ibc_proto = + generate_crate_access_2018("ibc-proto").expect("'ibc-proto' crate not found"); + + let proto_any = quote! { #ibc_proto::google::protobuf::Any }; + quote! { + impl #impl_generics + #crate_::protobuf::Protobuf<#proto_any> + for #this #ty_generics #where_clause {} + + #impl_try_from_any + + #impl_from_self_for_any + } + } +} diff --git a/derive/src/utils.rs b/derive/src/utils.rs new file mode 100644 index 0000000000..1115e6f83f --- /dev/null +++ b/derive/src/utils.rs @@ -0,0 +1,47 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use proc_macro2::Span; +use proc_macro_crate::{crate_name, FoundCrate}; +use quote::quote; +use syn::parse::Error; +use syn::{Ident, Path, TypePath}; + +/// Generate the crate access for the crate using 2018 syntax. +/// +/// for `ibc` output will for example be `ibc_rs`. +pub fn generate_crate_access_2018(def_crate: &str) -> Result { + if std::env::var("CARGO_PKG_NAME").unwrap() == def_crate { + return Ok(Ident::new(&"crate", Span::call_site()).into()); + } + match crate_name(def_crate) { + Ok(FoundCrate::Itself) => { + let path = Ident::new(&def_crate.to_string().replace("-", "_"), Span::call_site()); + Ok(syn::parse2(quote! { ::#path })?) + }, + Ok(FoundCrate::Name(name)) => { + let ident = Ident::new(&name, Span::call_site()); + Ok(syn::parse2(quote! { ::#ident })?) + }, + Err(e) => Err(Error::new(Span::call_site(), e)), + } +} + +pub fn ident_path(ident: Ident) -> TypePath { + let client_def_path = TypePath { qself: None, path: syn::Path::from(ident) }; + client_def_path +} diff --git a/docs/architecture/README.md b/docs/architecture/README.md deleted file mode 100644 index b50a2fd3d0..0000000000 --- a/docs/architecture/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Architecture Decision Records (ADR) - -This is a location to record all high-level architecture decisions in the IBC-RS project. - -You can read more about the ADR concept in this [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t). - -An ADR should provide: - -- Context on the relevant goals and the current state -- Proposed changes to achieve the goals -- Summary of pros and cons -- References -- Changelog - -Note the distinction between an ADR and a spec. The ADR provides the context, intuition, reasoning, and -justification for a change in architecture, or for the architecture of something -new. The spec is much more compressed and streamlined summary of everything as -it is or should be. - -If recorded decisions turned out to be lacking, convene a discussion, record the new decisions here, and then modify the code to match. - -Note the context/background should be written in the present tense. - -To suggest an ADR, please make use of the [ADR template](./adr-template.md) provided. - -## Table of Contents - -| ADR \# | Description | Status | -| ------ | ----------- | ------ | -| [001](./adr-001-repo.md) | Repository structure for `ibc-rs` | Accepted | -| [002](./adr-002-ibc-relayer.md) | IBC Relayer in Rust | Accepted | -| [003](./adr-003-handler-implementation.md) | IBC handlers implementation | Accepted | -| [004](./adr-004-relayer-domain-decomposition.md) | Relayer domain decomposition | Accepted | -| [005](./adr-005-relayer-v0-implementation.md) | Relayer v0 implementation | Accepted | -| [006](./adr-006-hermes-v0.2-usecases.md) | Hermes v0.2.0 Use-Cases | Proposed | -| [007](./adr-007-error.md) | Error Management | Accepted | -| [008](./adr-008-ics20-implementation.md) | ICS20 implementation | Accepted | -| [009](./adr-009-chain-endpoint-handle-standardization.md) | ChainEndpoint and ChainHandle methods standardization | Accepted | diff --git a/docs/architecture/adr-001-repo.md b/docs/architecture/adr-001-repo.md deleted file mode 100644 index a160313c0f..0000000000 --- a/docs/architecture/adr-001-repo.md +++ /dev/null @@ -1,175 +0,0 @@ -# ADR 001: Repository Structure - -## Changelog - -* 2020-07-22: First draft. - -## Context - -This document provides a basic rundown of the structure of this repository, plus some plans for its evolution. - -This repository comprises a Rust implementation of the [IBC](https://github.com/cosmos/ibc) suite of protocols. -To complement this implementation, this repository also comprises specifications, primarily written in TLA+, and -sometimes in English. - -At the moment we are invested mostly in the development of a relayer and several important modules (client, connection, -channel, and packets). -Eventually, we hope to cover the full IBC suite. - -## Decision - -The `ibc-rs` repository comprises three broad parts: - -1. The codebase for the IBC relayer implementation in Rust is in `relayer/`, which consists of crate **`relayer-cli`** (the -frontend application of the relayer) as well as crate **`relayer`** (the core relayer functionality). -2. The codebase for IBC modules is in `modules/`, making up the crate called **`relayer-modules`**. -3. English and TLA+ specs reside under `docs/spec`, classified by the component they target, e.g., relayer or connection -handshake. - -Following the work in [#142](https://github.com/informalsystems/ibc-rs/issues/142), the crate -**`ibc-proto`**(originally in a [separate repo](https://github.com/informalsystems/ibc-proto) and [documented here](https://docs.rs/ibc-proto/)) -shall also become absorbed into the present repo. - -In the following, we discuss the current state and proposed evolution of each of the Rust crates. - -#### Crate `relayer-cli` - -The basic concern of this crate is to provide user-facing functionality for the IBC relayer. This means -implementing a CLI application that dispatches a _command_ to a specific part of the relayer, and then outputs the -result of executing that command. This crate builds on -[Abscissa](https://docs.rs/abscissa_core/0.5.2/abscissa_core/) to simplify command line parsing, application process -lifecycle, and error handling. - -This crate can accept various sub-commands, e.g. `query` a chain for some specific part of their store, `start` the -relayer, or start the `light` client for a given chain. Note that most commands can be further refined with parameters -(for instance, the `query` command can be issued for a `connection` or `channel` or `client`). The bulk of data types -and logic resides in `relayer/cli/commands`, grouped by each specific command. - -#### Crate `relayer` - -This crate implements the core responsibilities of an IBC relayer. Briefly speaking, there are 3 high-level -requirements on a IBC relayer, in no particular order: - -- __R1.__ ability to interface with IBC-enabled chains, with the purpose of reading their state and submitting transactions to -these chains; -- __R2.__ ability to run a light client for IBC-enabled chains, with the purpose of verifying headers and state of these chains; -- __R3.__ implement the IBC relayer algorithms (ICS 018). - -Some functionality described above overlaps with functionality of IBC Modules. For instance, some logic -that the relayer implements for handling connection handshakes (in ICS18) overlaps with logic in the IBC module specific -for connections (ICS3). Given this overlap, the `relayer-modules` crate serves as the "ground truth" implementing the -said logic, while the `relayer` crate has a natural dependency on `relayer-modules`. - -In addition to the dependency on the IBC Modules, the relayer also depends on the `tendermint-rs` crate. This is -useful in particular for interfacing with the light client implementation from this crate, as well as core data types -such as `SignedHeader`, `Validator`, or `ValidatorSet`. - -[ADR 002](./adr-002-ibc-relayer.md) captures more specific details on the relayer architecture. - -#### Crate `relayer-modules` - -The [canonical IBC specification](https://github.com/cosmos/ibc/tree/master/spec/) is modular in the sense of grouping -different components of the specification in modules; for instance, specification _ICS03_ pertains to the abstraction of -IBC connections and the IBC connection handshake protocol, while _ICS04_ pertains to IBC channels and packets. -We group the code in this crate to reflect the modular separation in the canonical IBC specification. - -A few common patterns we employ in this crate are as follows. - -###### `msgs.rs` - -Many IBC protocols involve the receiving and processing of messages. -The protocols for establishing a connection (ICS03) or a channel (ICS04), for example, comprise -the processing of four different types of messages each. -In particular, the data structures representing these messages for connection handshake are `MsgConnectionOpenInit`, -`MsgConnectionOpenTry`, `MsgConnectionOpenAck`, and `MsgConnectionOpenConfirm`. - -The creation and validation of protocol messages for each protocol resides in `msgs.rs` within the respective ICS. -Each of these messages should implement the trait `pub trait Msg`, ensuring that all messages implement a basic -interface allowing them to be routed correctly (via the IBC routing module and with the help of the `route()` method) -or support basic validation. - -###### Error handling - -Each ICS enumerates specific errors that may occur within `icsX_NAME/error.rs`. -The error-handling pattern here build on [thiserror](https://lib.rs/crates/thiserror) and -[anomaly](https://lib.rs/crates/anomaly) for capturing the context of errors plus backtraces (optional). -Generally speaking, an IBC module constructs and propagates errors to the caller by two patterns: - -```Rust -return Err(Kind::MissingCounterparty.into()) -``` - -or if a context can be supplied this is preferable: - -```rust -return Err(Kind::InvalidConnectionHopsLength - .context("validate channel") - .into()); -``` -where the ICS itself defines `Kind::InvalidConnectionHopsLength` and `Kind::MissingCounterparty`. - -###### Deserialization - -See the details for the crate `ibc-proto` [below](#crate-ibc-proto). - -#### Crate `ibc_proto` - -The `ibc-proto` library gives a developer access to the Cosmos SDK IBC proto-defined structs directly in Rust. -The canonical IBC structs reside presently in [cosmos-sdk](https://github.com/cosmos/ibc-go/tree/main/proto/ibc), -defined in a proto3 syntax. -We compile these structs via prost directly to .rs files and import them into the other crates typically under the same -name prefixed with "Raw", for example: - -```Rust -use ibc_proto::channel::Channel as RawChannel; -``` - -For any Raw data type that is defined in `ibc-proto` we implement the `DomainType` trait, which serves as a translation -& validation layer between the proto ("Raw") types and the domain types. For example, for a `Channel` we do as follows: - -```Rust -impl DomainType for ChannelEnd {} - -impl TryFrom for ChannelEnd { - type Error = anomaly::Error; - - fn try_from(value: RawChannel) -> Result { - // Translate, validate each field from RawChannel into a Channel. - } -} - -impl From for RawChannel { - fn from(value: ChannelEnd) -> Self { - // Translate Channel into a RawChannel - } -} -``` - -This issue [#130](https://github.com/informalsystems/ibc-rs/issues/130) is a good starting place for more context -on `ibc-proto`. - -### References - -The following resources serve as reference implementations or specifications that we use to guide the development of -the present crates: - -For the IBC relayer: - -- A first implementation of the IBC relayer in Golang is under active development at -[iqlusioninc/relayer](https://github.com/iqlusioninc/relayer). -- The English specification of the relayer algorithm is captured in the -[ICS018](https://github.com/cosmos/ibc/tree/master/spec/relayer/ics-018-relayer-algorithms) spec. - -For IBC modules: - -- A Golang implementation of IBC modules is under active development -at [cosmos/ibc-go](https://github.com/cosmos/ibc-go/tree/main/modules). -- The English specifications for IBC modules reside in [cosmos/ibc](https://github.com/cosmos/ibc/tree/master/spec). - -## Status - -Proposed - -## Consequences - -Not applicable. diff --git a/docs/architecture/adr-002-ibc-relayer.md b/docs/architecture/adr-002-ibc-relayer.md deleted file mode 100644 index 3947251401..0000000000 --- a/docs/architecture/adr-002-ibc-relayer.md +++ /dev/null @@ -1,798 +0,0 @@ -# ADR 002: IBC Relayer in Rust - -## Changelog -* 2020-05-19: First draft. Accepted -* 2020-04-06: Configuration updates - -## Definitions -These definitions are specific for this document and they may not be consistent with the IBC Specification. - -IBC transaction - a transaction that includes IBC datagrams (including packets). This is constructed by the relayer and sent over the physical network to a chain according to the chain rules. For example, for tendermint chains a `broadcast_tx_commit` request is sent to a tendermint RPC server. - -IBC datagram - is an element of the transaction payload sent by the relayer; it includes client, connection, channel and IBC packet data. Multiple IBC datagrams may be included in an IBC transaction. - -IBC packet - a particular type of IBC datagram that includes the application packet and its commitment proof. - -On-chain IBC Client (or IBC Client) - client code running on chain, typically only the light client verification related functionality. - -Relayer Light Client - full light client functionality, including connecting to at least one provider (full node), storing and verifying headers, etc. - -Source chain - the chain from which the relayer reads data to fill an IBC datagram. - -Destination chain - the chain where the relayer submits transactions that include the IBC datagram. - -A and B chains - for connection protocol, A is the "initiating" chain where `MsgConnectionOpenInit` is initially processed and eventually `MsgConnectionOpenAck`. B is the chain where `MsgConnectionOpenTry` and `MsgConnectionOpenConfirm` are processed. -Similar for channel handshake protocol. - -## Context -A relayer is an off-chain process responsible for relaying IBC datagrams between two or more chains by scanning their states and submitting transactions. This is because in the IBC architecture, modules are not directly sending messages to each other over networking infrastructure, but instead they create and store the data to be retrieved and used by a relayer to build the IBC datagrams. - -This document provides an initial Rust implementation specification of a relayer that interconnects Cosmos-SDK/ Tendermint chains. - -The diagram below shows a high level view of the relayer and its interactions with the source and destination chains. The next sections go in more details of the different interactions. - -![IBC Relayer Architecture Diagram](assets/IBC_relayer.jpeg). - -## Assumptions and Dependencies -This section covers assumptions and dependencies about the chains and their IBC implementation. The first implementation focuses on and will only be tested with Cosmos-SDK/ Tendermint chains. In addition, functionality required by the relayer that is outside the scope of this document, and the availability of their implementations is considered. - -#### Data Availability -The relayer monitors the chain state to determine when packet forwarding is required. The relayer must be able to retrieve the data within some time bound. This is referred to as **data availability**. - -#### Data Legibility -IBC protocol defines the minimal data set that must be made available to relayers for correct operation of the protocol. The relayer expects the data to be legible, i.e. **data should be serialized** according to the IBC specification format; this includes consensus state, client, connection, channel, and packet information, and any auxiliary state structure necessary to construct proofs of inclusion or exclusion of particular key/value pairs in state. - -#### Query Functionality -IBC host state machines MUST expose an interface for inspecting their state. For Cosmos/Tendermint chains this means: -- the IBC modules on chain correctly implement and respond to queries - - [IBC-Modules-Rust] an implementation for some queries currently exist in Cosmos-SDK and same and more need to be implemented in Rust. The full requirements are detailed in section Relayer Queries. -- the relayer needs the ability to send rpc/http ABCI queries to and receive replies from Tendermint/Cosmos-SDK - - [[ABCI Rust](https://github.com/tendermint/rust-abci)] - ABCI Rust implementation - - [IBC-Modules-Rust] identifier validation is required (ICS-024) - - [IBC-Modules-Rust] requires Rust types for all query responses - - [[Merkle-Proofs-Rust](https://github.com/confio/ics23/tree/master/rust)] (candidate implementation) - some query responses include proofs and included in IBC transactions by the relayer (some may be validated, TBD) - -#### IBC Messages -The relayer creates transactions that include IBC messages to manage clients, connections and channels, and send application packets to destination chains. These messages must be defined in the IBC Rust implementation [IBC-Modules-Rust]. - -#### IBC Logging System -IBC packet data & timeouts are not stored directly in the chain state (as this storage is presumed to be expensive) but are instead committed to with a succinct cryptographic commitment (only the commitment is stored). -As a consequence, IBC requires that a **host state machine MUST provide an event logging system** that logs data in the course of transaction execution. **Logs must be queryable** by relayers to read IBC packet data & timeouts. - -The logging system must provide the following functions: - - [IBC-Modules-Go] emitLogEntry for emitting log entries called by the state machine during transaction execution: - - type emitLogEntry = (topic: string , data: []byte) => void - - example: emitLogEntry("sendPacket", {sequence: packet.sequence , data: packet.data, timeout: packet.timeout}) - - [IBC-Modules-Go] queryByTopic for querying past logs matching a given topic: - - type queryByTopic = (height: uint64 , topic: string) => Array < []byte > - -#### Keyring -The relay process must have access to its accounts with tokens on all destination chains, with sufficient balance to pay for transaction fees. Account key information must be stored and managed securely. A keyring implementation is required for CRUD key operations. -[Keyring-Rust] Investigation in existing Rust implementations is needed. (ex: [hwchen-keyring](https://github.com/hwchen/keyring-rs)) - -### Chain Transactions and Signing -The relayer must create chain specific signed transactions. -[Cosmos-Tx-Rust] For the first release Cosmos-SDK transaction signing is required. One possible implementation is [iqlusion's sdtx crate](https://github.com/iqlusioninc/crates/tree/main/stdtx) - -#### Implementation of IBC "routing module" -The default IBC handler uses a receiver call pattern, where modules must individually call the IBC handler in order to bind to -ports, start handshakes, accept handshakes, send and receive packets, etc. While this provides flexibility for modules, it imposes extra work on the part of the relayer processes that now needs to track the state of multiple modules. The IBC specification describes an IBC “routing module” to route packets, and simplify the task of relayers. This routing module accepts external datagrams and calls into the IBC handler to deal with handshakes and packet relay. The routing module keeps a lookup table of modules, which it can use to look up and call a module when a packet is received, so that external relayers need only ever relay packets to the routing module. -[IBC-Routing-Module-Go] Initial version of the relayer assumes that chains implement the "routing module" - -#### Batching -The relayer may batch IBC datagrams in a single transaction if supported by destination chain and allowed by configuration. In this case the relayer can amortise any overhead costs (e.g. signature checks for fee payment). -Initial version of the relayer assumes batching is supported by all chains. An option may be later included in the configuration file. - -## Relayer Requirements - -A correct relayer MUST: - -- **[R-config-start]** Read, parse, validate a configuration file upon start and configure itself for the specifed chains and paths -- **[R-transport]** Have access to the networking protocols (e.g. TCP/IP, UDP/IP, or QUIC/IP) and physical transport, required to read the state of one blockchain/ machine and submit data to another -- **[R-provider]** Maintain transport connections to at least one full node per chain -- **[R-query]** Query IBC data on source and destination chains -- **[R-light-client]** Run light clients for source chains and -- **[R-IBC-client]** create and update IBC clients on destination chains -- **[R-accounts]** Own accounts on destination chains with sufficient balance to pay for transaction fees -- **[R-transact]** Create, sign and forward IBC datagram transactions -- **[R-relay]** Perform correct relaying of all required messages, according to the IBC sub-protocol constraints -- **[R-restart]** Resume correct functionality after restarts -- **[R-upgrade]** Resume correct functionality after upgrades -- **[R-proofs]** Perform proof verification (as it will be done on the destination chain) and not forward messages where proof verification fails - -The relayer MAY: -- **[R-config-cli]** Provide ways to change configuration at runtime -- **[R-bisection]** Perform bisection to optimize transaction costs and computation on destination chains -- **[R-relay-prio]** Filter or order transactions based on some criteria (e.g. in accordance with the fee payment model) - -## Implementation -The initial implementation will heavily borrow from the Go relayer implementation that uses a "naive" algorithm for relaying messages. The structure of the configuration file is similar with the one in Go (see [Go-Relayer](https://github.com/cosmos/relayer)) - -### Configuration - -> WIP - -Upon start the relayer reads a configuration file that includes global and per chain parameters. The file format is .toml -Below is an example of a configuration file. - -```toml -[global] -log_level = "error" - -[mode] - -[mode.clients] -enabled = true -refresh = true -misbehaviour = true - -[mode.connections] -enabled = false - -[mode.channels] -enabled = false - -[mode.packets] -enabled = true -clear_interval = 100 -clear_on_start = true -tx_confirmation = true - -[[chains]] - id = "chain_A" - rpc_addr = "http://localhost:26657" - grpc_addr = "http://localhost:9090" - websocket_addr = "ws://localhost:26657/websocket" - rpc_timeout = "10s" - account_prefix = "cosmos" - key_name = "testkey" - store_prefix = "ibc" - client_ids = ["clA1", "clA2"] - gas = 200000 - gas_adjustement = 1.3 - gas_price = "0.025stake" - trusting_period = "336h" - -[[chains]] - id = "chain_B" - rpc_addr = "http://localhost:26557" - grpc_addr = "http://localhost:9091" - websocket_addr = "ws://localhost:26557/websocket" - rpc_timeout = "10s" - account_prefix = "cosmos" - key_name = "testkey" - store_prefix = "ibc" - client_ids = ["clB1"] - gas = 200000 - gas_adjustement = 1.3 - gas_price = "0.025stake" - trusting_period = "336h" - -``` -The main sections of the configuration file are: -- `global`: -Relaying is done periodically and the frequency is dictated by the `timeout` parameter. The `strategy` parameter configures the relayer to run a particular relaying algorithm. -- `chains`: -Chain level information including account and key name, gas information, trusting period, etc. All source and destination chains must be listed here. -- paths (`connections`, `connections.paths`): -The relayer may be configured to relay between some application ports, over a number of connections and channels, in unidirectional or bidirectional mode. - -### Initialization - -The relayer performs initialization based on the content of the configuration file: -- the file is parsed and semantically validated -- the chains, connections, ports, channels for which relaying is enabled are stored in the Config structure - -```rust -pub struct Config { - pub global: GlobalConfig, - pub chains: Vec, - pub connections: Option>, -} - -pub struct GlobalConfig { - /// All valid log levels, as defined in tracing: - /// https://docs.rs/tracing-core/0.1.17/tracing_core/struct.Level.html - pub log_level: String, -} - -pub struct ChainConfig { - pub id: ChainId, - pub rpc_addr: tendermint_rpc::Url, - pub websocket_addr: tendermint_rpc::Url, - pub grpc_addr: tendermint_rpc::Url, - pub rpc_timeout: Duration, - pub account_prefix: String, - pub key_name: String, - pub client_ids: Vec, - pub gas: u64, - pub trusting_period: Duration, -} - -pub struct Connection { - pub src: Option, // use any source - pub dest: Option, // use any destination - pub paths: Option>, // use any port, direction bidirectional -} - -pub struct ConnectionEnd { - pub client_id: String, - pub connection_id: Option, // use all connections to this client -} - -pub enum Direction { - Unidirectional, - Bidirectional, -} - -pub struct RelayPath { - pub src_port: Option, // default from any source port - pub dest_port: Option, // default from any dest port - pub src_channel: Option, // default from any source port - pub dest_channel: Option, // default from any dest port - pub direction: Direction, // default bidirectional -} -``` -All `Option` fields with `None` values mean "any" values. For `direction`, default is bidirectional. -All non-`Option` fields are mandatory and must appear in the configuration file. -If the relayer is started with an invalid configuration file, an error is displayed and the realyer process exits. - -### Relayer Commands - -#### Validate -To validate a configuration file: - -`relayer -c config validate ` - -The command verifies that the specified configuration file parses and it is semantically correct. - -#### Start -To start the relayer: - -`relayer -c start` - -The command performs the validation as described above and then starts the relayer. - -#### Query -Most of the queries performed while relaying are also available from the CLI. - -`relayer -c query client state [-h ] [-p ]` - -The command queries the full client state of `` on `` at ``, with or without proof depending on the `` flag. Default `` is latest state and `` is `true`. - -`relayer -c query client consensus [-h ] [-p ]` - -The command queries the consensus state of `` at height `` on `` at ``, with or without proof depending on the `` flag. Default `` is latest state and `` is `true`. - -### Relayer Queries -The relayer queries chain state in order to build the IBC messages. It is expected that each chain type provides implementations of these queries. Initial Rust relayer implementation will be tested with Cosmos-SDK/Tendermint chains, and while some of the IBC-Modules functionality in Rust is not required (e.g. handler functions), a "query" crate should be available for the relayer. -For tendermint, the queries use the `abci.RequestQuery` over rpc/http to retrieve the data. - -The format of the public/ provable state query parameters and responses is chain independent and should also be defined in this crate. - -The following queries are required: - -- `query_store_prefix(chain)` - returns the commitment prefix of the chain (returns chain specific []byte, e.g. `ibc` for tendermint) -- `query_all_client_states(chain)` - returns the IBC light clients instantiated on the chain -- `query_client_consensus_state(chain, clientID, height)` - returns the consensus state proof for a light client at a given height if height > 0, else it returns the latest height -- `query_connections(chain)` - returns all connections created on the chain -- `query_client_connections(chain, clientID)` - returns all connections associated with a light client -- ...more to be added - -### Relayer Concurrency Architecture -The following threads are spawned and execute within the relayer process: -- one Tendermint full light client thread, per configured configured source chain. For example if A->C and B->C paths are enabled then there will be two light client threads, one for A and one for B. These threads download light client headers (block header and commits), verify them and store them as trusted headers in the per chain stores. -- one thread for the main relaying functionality, aka relay thread. -- one thread to relay notifications from source chain and to generate IBC events to the relay thread. - -The figure below shows the interactions for the last two threads. -![IBC relayer threads](assets/IBC_relayer_threads.jpeg) - -On start: -1. Communication (channel ?) between the relay and the notification threads is established. -2. The notification thread registers for IBC events. -3. The relay thread creates the IBC datagrams for A, for all configuration triggered events, for clients `MsgCreateClient`, `MsgUpdateClient` and -4. for connections and channels, i.e. `MsgConnOpenInit` and `MsgChannOpenInit` are sent to chains to initiate connection and channel handshake if required. It then waits for events from the notification thread. -5. The notification thread queries the source chain A at latest height and -6. sends IBC events to the relay thread. Then it waits for notifications from A. -7. For each event related to X (connection, channel or packets), the relay thread queries the client and X state on destination B, and -8. the X state on source chain A. -9. With the information collected in previous steps, the relay thread creates a buffer of messages destined to destination B. -10. When the notification thread receives an IBC notification for X it sends it to the relay thread. -11. Steps 11-14 are the same as 6-9 above. - -Initial version will have a single relay thread for all configured paths. Temporary threads may be created for the source and destination queries required. -Future versions may create multiple relay threads. One possibility is to create one for each destination chain Z, responsible for relaying over *->Z paths. Or have a thread pool, selecting an available thread for relaying to a given destination. The notification thread will route the IBC events to the proper thread. Multiple notification threads, e.g. per source, should also be considered. - -### Relayer Algorithm - -A relayer algorithm is described in [relayer algorithm described in IBC Specification](https://github.com/cosmos/ibc/blame/master/spec/relayer/ics-018-relayer-algorithms/README.md#L47) and [Go relayer implementation ](https://github.com/cosmos/relayer/blob/f3a302df9e6e0c28883f5480199d3190821bcc06/relayer/strategies.go#L49.). - -This section describes some of the details of the realy thread algorithm in the Rust implementation. Inputs are the IBC Events and the events of interest are described in Appendix A. - -At high level, for each event from a source chain, the relayer: -- queries client, connection, channels and/or packet related state on source and destination chains, -- creates new datagrams if needed, -- batches multiple datagrams in single transaction, -- signs and submits these transactions to the destination. - -#### Proofs -The relayer must include proofs in some datagrams as required by the IBC handlers. There are two types of proofs: -- proof of some local state on source chain (A). For example, a proof of correct connection state (`ProofInit`, `ProofTry`, `ProofAck`) is included in some of the connection handshake datagrams. The `ConnOpenTry` message includes the `ProofInit` that is obtained from chain A where the connection should be in `INIT` state and have certain local and counterpary identifiers. The message specific sections below go in more details. -- proof that the chain A's IBC client `clB` is updated with a consensus state and height that have been stored on chain B. -- these proofs are verified on chain B against the consensus state stored by the A client at `proof_height`. - -Notes: -The proof checks require the handlers on B to recreate the state as expected on chain A and to verify the proof against this. For this to work the store prefix of A needs to be added as prefix to the proof path (standardized in ICS 24). There is currently no query endpoint for this in Cosmos-SDK/Tendermint and initial relayer version includes a per chain store prefix in the configuration. -The verification on B requires the presence of a consensus state for client A at same height as `proof_height`. - -#### Light Client Messages -After initialization, relayer light clients are created on the destination chains if not already present. -For a successful A->B relay of IBC packets IBC clients must be instantiated on both source and destination chains, potentially by different relayers. The client creation is permissionless and a relayer may create a client if not already present. -```rust -let msg = MsgCreateClient::new(client_id, header, trusting_period, bonding_period, signer); -``` - -The relayer runs its own light client thread for A that periodically retrieves and verifies headers. The relay thread uses the stored headers to update the A-client on chain B with new headers as required. -```rust -let msg = MsgUpdateClient::new(client_id, header, signer); -``` -It is possible that the relay thread needs a more recent trusted header and in this case it would need a mechanism to signal the client thread to retrieve this header. -Since the relayer must pay for all transactions, including `MsgClientCreate` and `MsgClientUpdate`, there are incentives for optimizations. -For example, light client implementation of Tendermint supports bisection and the relayer may choose to send skipping headers to A-client on B, periodically or when required by new IBC datagrams. - -#### IBC Client Consensus State vs Relayer Light Client States vs Chain states -A number of IBC datagrams contain proofs obtained from chain A at a some height `h`. A proof needs to be verified on B against the commitment root for `h` which, for Tendermint clients, is included in the client consensus state at `h+1`. This is because for Tendermint chains the application Hash after applying all transactions in block `n` is included in block at height `n+1`. - -The relayer therefore needs to ensure that the consensus state at `proof_height+1` exists on chain B. - -One proposal is shown below and described in the rest of this section. -![IBC_client_heights](assets/IBC_client_heights.jpeg) - -The relayer creates a light client on B with `hi` and then updates it as required by processing different IBC events. Let `ha'` be the last consensus state for client on B. -When some IBC event for X (connection, channel or packet) is received, it includes the height, let it be `hx-1` at which the event occured on A. -According to the proposal here, the relayer should: -- get the latest consensus state height of client on B, `ha` -- let `h = max(hx, ha)` -- query for item X at height `h-1` and get a proof `p` at this height -- wait for the block at height `hx` to be received, i.e. `Ev{block, hx}` -- get the minimal set of headers from the light client such that `h` verifies against `ha` -- send zero or more `MsgUpdateClient` datagrams and the `MsgX{X, p, h}` in a transaction to B -- if the transaction is successful or `MsgX..` failed, then "consume" the `Ev{X,..}` - - if `MsgX` fails there is nothing that can be done, another relayer must have submitted first -- else raise again the event at `hA-1` if one not already there -- the effect of this is that a new query is made at `hA-1` and since the consensus state at `hA` exists on B, only `MsgX` needs to be sent out - -#### Connection Messages -The relayer queries the source and destination chains of the relaying paths in order to determine if connection handshake datagrams should be sent to destination chains. - -##### Connection Query -The following structures pertain to connection queries and should be detailed in [IBC-Modules-Rust-ADR]. -The structures are shown here for reference. - -```rust -pub struct Counterparty { - pub client_id: ClientId, - pub connection_id: ConnectionId, - pub prefix: CommitmentRoot, -} - -pub struct ConnectionEnd { - pub state: ConnectionState, - pub Id: ConnectionId, - pub client_id: ClientId, - pub counterparty: Counterparty, - pub versions: Vec -} - -pub enum ConnectionState { - "UNINIT", - "INIT", - "TRYOPEN", - "OPEN", -} - -// ConnectionResponse defines the query response for a connection. -// It includes the proof and the height at which the proof was retrieved. -pub struct ConnectionResponse { - pub connection: ConnectionEnd, - pub proof: Option, - pub proof_path: CommitmentPath, - pub proof_height: Height, -} -``` - -#### Connection Relaying - -The figure below shows the four connection handshake message types that can be created during a relay cycle (see the Relayer box and the four actions). For each message the queries (light grey arrows) and expected states on `A` and `B` are shown. For example, if the connection on A is in `OPEN` state and on B in `TRYOPEN`, the relayer will send a transaction to B including the `ConnOpenConfirm` datagram. Once processed on B, the state of connection changes from `TRYOPEN` to `OPEN`. - -![IBC connection handshake relay](assets/IBC_conn_handshake_relay.jpeg) - -##### MsgConnectionOpenInit -The `MsgConnectionOpenInit` message is used to initialize a connection. This is done when the relay thread starts, after loading the configuration that includes the connection information and before entering its event loop. In this section it is assumed the message is relayed to A. -```rust -pub struct MsgConnectionOpenInit { - pub connection_id: ConnectionId, // connAtoB - pub client_id: ClientId, // clB - pub counterparty: Counterparty, // {ClientID: clA, ConnectionID: connBtoA, Prefix: "B_store"> - pub signer: AccAddress -} -``` -The comments show the values of the fields for the diagram above. - -The relayer creates and forwards this message only if it has been explicitly configured with the connection information (see `connections.src` and `connections.dest`sections of the configuration file). - -In order to create a `MsgConnectionOpenInit` the relayer recreates the `ConnectionEnd` from the configuration, as it will be stored on A. The steps are: -- create the `ConnectionEnd` for the B->A path -```rust - let connection_a = getConfiguredConnection(A, B, ..); -``` -- query connection state on chain A and if it already exist then continue with next event -```rust -let existing_a = ibc_query_connection(chainA, connection_a); -if existing_a.state != "UNINIT" { - continue; -} -``` -- create the message -```rust -let init_msg = MsgConnectionOpenInit { - connection_id: connection_a.connection_id, - client_id: connection_a.client_id, - counterparty: Counterparty { - ClientID: connection_a.counterparty.client_id, - connection_id: connection_a.counterparty.connection_id, - prefix: config(B).store_prefix, - } - Signer: config(A).signer, -} -``` -- send `init_msg` in a transaction to B - -##### MsgConnectionOpenTry -The `MsgConnectionOpenTry` defines the message sent by the relayer to try to open a connection. In this section it is assumed to be relayed to B. - -```rust -pub struct MsgConnectionOpenTry { - pub connection_id: ConnectionId, // connBtoA - pub client_id: ClientId, // clA - pub counterparty: Counterparty, // {ClientID: clB, ConnectionID: connAtoB, Prefix: "A_store"> - pub counterparty_versions: Vec, - pub proof_init: CommitmentProof, // proof that connAtoB connection end is stored on Chain A - pub proof_consensus: CommitmentProof, // proof that on A at proof_height (hA), the B client has - // stored B's consensus state at consensus_height (hB) - pub proof_height: Height, // hA, height of A at which relayer retrieved proof_init - pub consensus_height: Height, // hB - pub signer: AccAddress, -} -``` -The comments show the values of the fields for the diagram above. -Note: -- `proof_height` is the height of chain A when relayer created the `proof_init`, hA in the diagram. -- `consensus_height` is the latest height of chain B that chain A has stored in its client `clB` at the time the relayer queried that client, `hB` in the diagram - -The relayer creates a `MsgConnectionOpenTry` for the A->B relay path when an IBC event notification is received. -The steps are: -- let `connAtoB` be the connection identifier on A,`hx` the height when the event occurred and `clA` the client ID of A on B -- query last client state height on B -```rust -let ha_prime = ibc_query_client_state(chainB, 0).height; -``` -- create `UpdateClientMsg`(s) for `clA` on chain B if required (i.e. if `hx` is higher than latest height of `clA` on B) -```rust - let h = max(hx, ha_prime); - let headers = get_minimal_set(h, ha_prime); - let client_msgs = updateClientMsgs(clA, headers, signer); -``` -- send `client_msgs` to B -- query latest height `ha` of A and wait for `ha > h` (Rust TODO) -- query connection with proof at `h` on chain A and if it is not in proper state then continue with the next event -```rust - let query_response = ibc_query_connection_with_proof(chainA, connAtoB, h); - if query_response.connection.state != "INIT" { - continue; - } - let connection_a = query_response.connection; - let proof_init = query_response.proof; - let proof_height := query_response.proof_height; - assert(proof_height = h); -``` -- query the consensus state stored by client `clB` on A -```rust - let consensus_response = ibc_query_consensus_with_proof(chainA, connection_a.client_id); - let proof_consensus = consensus_response.proof; - let consensus_height = consensus_response.proof_height; -``` -- create the `MsgConnectionOpenTry` message with the information collected above. -```rust -let try_msg = MsgConnectionOpenTry { - connection_id: connBtoA, - client_id: clA, - counterparty: Counterparty{ - client_id: connection_a.client_id, - connection_id: connAtoB, - prefix: config(A).store_prefix, - } - proof_init, - proof_consensus, - proof_height, - consensus_height, - signer: config.B.Signer(), -} -``` -- send `try_msg` to B - -When `MsgConnectionOpenTry` is processed on B, the message handler: -- checks that `consensus_height` is valid (smaller or equal than chain B's current height) and within trusting period, -- client `clA` verifies `proof_consensus` for B's consensus state at `consensus_height` and -- client `clA` verifies `proof_init` for the `ConnectionEnd`object that B expects to be present on A at `proof_height`. -The relayer may also perform these verifications before submitting the transaction. - -##### MsgConnectionOpenAck -(WIP) - needs to be updated with correct query sequence - -`MsgConnectionOpenAck` defines the message sent by the relayer to chain A to acknowledge the change of connection state to `TRYOPEN` on Chain B. - -```rust -pub struct MsgConnectionOpenAck { - pub connection_id: ConnectionId, // connAtoB - pub proof_try: CommitmentProof, // proof that connBtoA on Chain B is in TRYOPEN state - pub proof_consensus: CommitmentProof, // proof that on B at proof_height (hB), the A client has - // stored A's consensus state at consensus_height (hA) - pub proof_height: Height, // hB, height of B at which relayer retrieved proof_try - pub consensus_height: Height, // hA - pub versions: , - pub signer: AccAddress, -} -``` -The comments show the values of the fields for the diagram above. -Note: -- `proof_height` is the height of chain B when relayer created the `proof_try`, hB in the diagram. -- `consensus_height` is the latest height of chain A that chain B has stored in its client `clA` at the time the relayer queried that client, `hA` in the diagram - -The relayer creates a `MsgConnectionOpenAck` for the B->A relay path when an IBC event notification is received or when chain B is scanned. The steps are: -- let `connBtoA` be the connection identifier on B -- query connection with proof on chain B and if it is not in proper state then continue with next event -```rust - let query_response = ibc_query_connection_with_proof(chainB, connBtoA); - if query_response.connection.state != "TRYOPEN" { - continue; - } - let connection_b = query_response.connection; - let proof_try = query_response.proof; - let proof_height := query_response.proof_height; -``` -- query connection on chain A and validate its state: -```rust - let connAtoB = connection_b.counterparty.connection_id; - let connection_a = ibc_query_connection(chainA, connAtoB); - if connection_a.state != "INIT" && connection_a.state != "TRYOPEN" { - continue; - } -``` -- create `UpdateClientMsg` for `clB` on chain A if required (i.e. if `proof_height` is higher than latest height of `clB` on A) -```rust - let client_msg = MsgUpdateClient::new(connection_a.client_id, header, signer); -``` -- query the consensus state stored by client `clA` on B: -```rust - let consensus_response = ibc_query_consensus_with_proof(chainB, connection_b.client_id); - let proof_consensus = consensus_response.proof; - let consensus_height = consensus_response.proof_height; -``` -- create the `MsgConnectionOpenAck` message with the information collected above -```rust -let ack_msg = MsgConnectionOpenAck { - connection_id: connAtoB, - proof_try, - proof_consensus, - proof_height, - consensus_height, - signer: config.A.Signer(), -} -``` -- send `client_msg` and `ack_msg` in a transaction to A - -##### MsgConnectionOpenConfirm -(WIP) - needs to be updated with correct query sequence - -`MsgConnectionOpenConfirm` defines the message sent by the relayer to chain B to confirm the opening of a connection on chain A. - -```rust -pub struct MsgConnectionOpenConfirm { - pub connection_id: ConnectionId, // connBtoA - pub proof_confirm: CommitmentProof,// proof that connAtoB on chain A is in OPEN state - pub proof_height: Height, // hA, height of A at which relayer retrieved the proof_confirm - pub signer: AccAddress, -} -``` - -The relayer creates a `MsgConnectionOpenConfirm` for the A->B relay path when an IBC event notification is received or when chain A is scanned. The steps are: -- let `connAtoB` be the connection identifier on A -- query connection with proof on chain A and if it is not in proper state then continue with next event -```rust - let query_response = ibc_query_connection_with_proof(chainA, connAtoB); - if query_response.connection.state != "OPEN" { - continue; - } - let connection_a = query_response.connection; - let proof_confirm = query_response.proof; - let proof_height = query_response.proof_height; -``` -- query connection on chain B and validate its state: -```rust - let connBtoA = connection_a.counterparty.connection_id; - let connection_b = ibc_query_connection(chainB, connBtoA); - if connection_b.state != "INIT" && connection_b.state != "TRYOPEN" { - continue; - } -``` -- create `UpdateClientMsg` for `clA` on chain B if required (i.e. if `proof_height` is higher than latest height of `clA` on B) -```rust - let client_msg = MsgUpdateClient::new(connection_b.client_id, header, config.B.Signer()); -``` -- create the `MsgConnectionOpenConfirm` message with the information collected above -```rust -let confirm_msg = MsgConnectionOpenAck { - connection_id: connBtoA, - proof_confirm, - proof_height, - signer: config.B.Signer(), -} -``` -- send `client_msg` and `confirm_msg` in a transaction to A - -#### Channels -(WIP) -The channel handshake messages are relayed in a similar way as the connection ones. In addition, checks on the state of the underlying connection is performed. - -#### Packet, Timeouts and Acknowledgments -(WIP) -Application packets are not stored in the chain state, only a cryptographic commitment is stored. -The relayer has to query the chain's logging system to get the packet data for a given source port and channel. -The result of the query includes among others: - - the source port and channel identifiers - - the sequence number -These are used to create the packet's commitment path which is then used in a state query to get the packet commitment. - -## Inter-relayer Coordination -Multiple relayers may run in parallel and, while it is expected that they relay over disjoint paths, it could be the case that they may submit same transactions to a chain. In this case only the first transaction succeeds while subsequent fail causing loss of fees. Ideally some coordination would be in place to avoid this but this is out of scope of this document. - -## Relayer Restarts and Upgrades - -## Decision - -> This section explains all of the details of the proposed solution, including implementation details. -It should also describe affects / corollary items that may need to be changed as a part of this. -If the proposed change will be large, please also indicate a way to do the change to maximize ease of review. -(e.g. the optimal split of things to do between separate PR's) - -## Status - -> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement. - -{Deprecated|Proposed|Accepted} - -## Consequences - -> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones. - -### Positive - -### Negative - -### Neutral - -### Appendix A -The IBC Events, input to the relay thread are described here. - -``` -{"create_client": { - "client_id": , - "client_type": , - } -} - -{"update_client": { - "client_id": , - "client_type": , - } -} - -{"connection_open_init": { - "connection_id": , - "client_id": , - "counterparty_connection_id": , - "counterparty_client_id": , - } -} - -{}"connection_open_try": { - "connection_id": , - "client_id": , - "counterparty_connection_id": , - "counterparty_client_id": , - } -} - -{"connection_open_ack": { - "connection_id": , - } -} - -{"connection_open_confirm": { - "connection_id": , - } -} - -{"channel_open_init": { - "port_id": , - "channel_id": , - "counterparty_port_id": , - "counterparty_channel_id": , - "connection_id": , - } -} - -{"channel_open_try": { - "port_id": , - "channel_id": , - "counterparty_port_id": , - "counterparty_channel_id": , - "connection_id": , - } -} - -{"channel_open_ack": { - "port_id": , - "channel_id": , - } -} - -{"channel_open_confirm": { - "port_id": , - "channel_id": , - } -} - -{"channel_close_init": { - "port_id": , - "channel_id": , - } -} - -{"channel_close_confirm": { - "port_id": , - "channel_id": , - } -} - -{"send_packet": { - "packet_data": String, - "packet_timeout_height": String, - "packet_timeout_timestamp": String, - "packet_sequence": String, - "packet_src_port": , - "packet_src_channel": , - "packet_dst_port": , - "packet_dst_channel": , -} - -{"recv_packet": { - "packet_data": String, - "packet_ack": String, - "packet_timeout_height": String, - "packet_timeout_timestamp": String, - "packet_sequence": String, - "packet_src_port": , - "packet_src_channel": , - "packet_dst_port": , - "packet_dst_channel": , -} -``` - -## References - -> Are there any relevant PR comments, issues that led up to this, or articles referrenced for why we made the given design choice? If so link them here! - -* {reference link} diff --git a/docs/architecture/adr-003-handler-implementation.md b/docs/architecture/adr-003-handler-implementation.md deleted file mode 100644 index 5876c81fc0..0000000000 --- a/docs/architecture/adr-003-handler-implementation.md +++ /dev/null @@ -1,635 +0,0 @@ -# ADR 003: IBC handlers implementation - -## Changelog -* 2020-08-06: Initial proposal -* 2020-08-10: Rename Handler to Message Processor -* 2020-08-14: Revamp definition of chain-specific messages, readers and keepers -* 2021-12-29: Consolidate ADR with the implementation. - -## Context - -In this ADR, we provide recommendations for implementing the IBC -handlers within the `ibc` (modules) crate. - -## Decision - -Concepts are introduced in the order given by a topological sort of their dependencies on each other. - -### Events - -IBC handlers must be able to emit events which will then be broadcasted via the node's pub/sub mechanism, -and eventually picked up by the IBC relayer. - -An event has an arbitrary structure, depending on the handler that produces it. -Here is the [list of all IBC-related events][events], as seen by the relayer. -Note that the consumer of these events in production would not be the relayer directly -(instead the consumer is the node/SDK where the IBC module executes), -but nevertheless handlers will reuse these event definitions. - -[events]: https://github.com/informalsystems/ibc-rs/blob/bf84a73ef7b3d5e9a434c9af96165997382dcc9d/modules/src/events.rs#L15-L43 - -```rust -pub enum IBCEvent { - NewBlock(NewBlock), - - CreateClient(ClientEvents::CreateClient), - UpdateClient(ClientEvents::UpdateClient), - ClientMisbehavior(ClientEvents::ClientMisbehavior), - - OpenInitConnection(ConnectionEvents::OpenInit), - OpenTryConnection(ConnectionEvents::OpenTry), - // ... -} -``` - -### Logging - -IBC handlers must be able to log information for introspectability and ease of debugging. -A handler can output multiple log records, which are expressed as a pair of a status and a -log line. The interface for emitting log records is described in the next section. - -```rust -pub enum LogStatus { - Success, - Info, - Warning, - Error, -} - -pub struct Log { - status: LogStatus, - body: String, -} - -impl Log { - fn success(msg: impl Display) -> Self; - fn info(msg: impl Display) -> Self; - fn warning(msg: impl Display) -> Self; - fn error(msg: impl Display) -> Self; -} -``` - -### Handler output - -IBC handlers must be able to return arbitrary data, together with events and log records, as described above. -As a handler may fail, it is necessary to keep track of errors. - -To this end, we introduce a type for the return value of a handler: - -```rust -pub type HandlerResult = Result, E>; - -pub struct HandlerOutput { - pub result: T, - pub log: Vec, - pub events: Vec, -} -``` - -We introduce a builder interface to be used within the handler implementation to incrementally build a `HandlerOutput` value. - -```rust -impl HandlerOutput { - pub fn builder() -> HandlerOutputBuilder { - HandlerOutputBuilder::new() - } -} - -pub struct HandlerOutputBuilder { - log: Vec, - events: Vec, - marker: PhantomData, -} - -impl HandlerOutputBuilder { - pub fn log(&mut self, log: impl Into); - pub fn emit(&mut self, event: impl Into); - pub fn with_result(self, result: T) -> HandlerOutput; -} -``` - -We provide below an example usage of the builder API: - -```rust -fn some_ibc_handler() -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // ... - - output.log(Log::info("did something")) - - // ... - - output.log(Log::success("all good")); - output.emit(SomeEvent::AllGood); - - Ok(output.with_result(42)); -} -``` - -### IBC Submodule - -The various IBC messages and their processing logic, as described in the IBC specification, -are split into a collection of submodules, each pertaining to a specific aspect of -the IBC protocol, eg. client lifecycle management, connection lifecycle management, -packet relay, etc. - -In this section we propose a general approach to implement the handlers for a submodule. -As a running example we will use a dummy submodule that deals with connections, which should not -be mistaken for the actual ICS 003 Connection submodule. - -#### Reader - -A typical handler will need to read data from the chain state at the current height, -via the private and provable stores. - -To avoid coupling between the handler interface and the store API, we introduce an interface -for accessing this data. This interface, called a `Reader`, is shared between all handlers -in a submodule, as those typically access the same data. - -Having a high-level interface for this purpose helps avoiding coupling which makes -writing unit tests for the handlers easier, as one does not need to provide a concrete -store, or to mock one. - -```rust -pub trait ConnectionReader -{ - fn connection_end(&self, connection_id: &ConnectionId) -> Option; -} -``` - -A production implementation of this `Reader` would hold references to both the private and provable -store at the current height where the handler executes, but we omit the actual implementation as -the store interfaces are yet to be defined, as is the general IBC top-level module machinery. - -A mock implementation of the `ConnectionReader` trait could looks as follows: - -```rust -struct MockConnectionReader { - connection_id: ConnectionId, - connection_end: Option, - client_reader: MockClientReader, -} - -impl ConnectionReader for MockConnectionReader { - fn connection_end(&self, connection_id: &ConnectionId) -> Option { - if connection_id == &self.connection_id { - self.connection_end.clone() - } else { - None - } - } -} -``` - -#### Keeper - -Once a handler executes successfully, some data will typically need to be persisted in the chain state -via the private/provable store interfaces. In the same vein as for the reader defined in the previous section, -a submodule should define a trait which provides operations to persist such data. -The same considerations w.r.t. to coupling and unit-testing apply here as well. - -```rust -pub trait ConnectionKeeper { - fn store_connection( - &mut self, - client_id: ConnectionId, - client_type: ConnectionType, - ) -> Result<(), Error>; - - fn add_connection_to_client( - &mut self, - client_id: ClientId, - connection_id: ConnectionId, - ) -> Result<(), Error>; -} -``` - -#### Submodule implementation - -We now come to the actual definition of a handler for a submodule. - -We recommend each handler to be defined within its own Rust module, named -after the handler itself. For example, the "Create Client" handler of ICS 002 would -be defined in `modules::ics02_client::handler::create_client`. - -##### Message type - -Each handler must define a datatype which represent the message it can process. - -```rust -pub struct MsgConnectionOpenInit { - connection_id: ConnectionId, - client_id: ClientId, - counterparty: Counterparty, -} -``` - -##### Handler implementation - -In this section we provide guidelines for implementing an actual handler. - -We divide the handler in two parts: processing and persistence. - -###### Processing - -The actual logic of the handler is expressed as a pure function, typically named -`process`, which takes as arguments a `Reader` and the corresponding message, and returns -a `HandlerOutput`, where `T` is a concrete datatype and `E` is an error type which defines -all potential errors yielded by the handlers of the current submodule. - -```rust -pub struct ConnectionMsgProcessingResult { - connection_id: ConnectionId, - connection_end: ConnectionEnd, -} -``` - -The `process` function will typically read data via the `Reader`, perform checks and validation, construct new -datatypes, emit log records and events, and eventually return some data together with objects to be persisted. - -To this end, this `process` function will create and manipulate a `HandlerOutput` value like described in -the corresponding section. - -```rust -pub fn process( - reader: &dyn ConnectionReader, - msg: MsgConnectionOpenInit, -) -> HandlerResult -{ - let mut output = HandlerOutput::builder(); - - let MsgConnectionOpenInit { connection_id, client_id, counterparty, } = msg; - - if reader.connection_end(&connection_id).is_some() { - return Err(Kind::ConnectionAlreadyExists(connection_id).into()); - } - - output.log("success: no connection state found"); - - if reader.client_reader.client_state(&client_id).is_none() { - return Err(Kind::ClientForConnectionMissing(client_id).into()); - } - - output.log("success: client found"); - - output.emit(IBCEvent::ConnectionOpenInit(connection_id.clone())); - - Ok(output.with_result(ConnectionMsgProcessingResult { - connection_id, - client_id, - counterparty, - })) -} -``` - -###### Persistence - -If the `process` function specified above succeeds, the result value it yielded is then -passed to a function named `keep`, which is responsible for persisting the objects constructed -by the processing function. This `keep` function takes the submodule's `Keeper` and the result -type defined above, and performs side-effecting calls to the keeper's methods to persist the result. - -Below is given an implementation of the `keep` function for the "Create Connection" handlers: - -```rust -pub fn keep( - keeper: &mut dyn ConnectionKeeper, - result: ConnectionMsgProcessingResult, -) -> Result<(), Error> -{ - keeper.store_connection(result.connection_id.clone(), result.connection_end)?; - keeper.add_connection_to_client(result.client_id, result.connection_id)?; - - Ok(()) -} -``` - -##### Submodule dispatcher - -> This section is very much a work in progress, as further investigation into what -> a production-ready implementation of the `ctx` parameter of the top-level dispatcher -> is required. As such, implementors should feel free to disregard the recommendations -> below, and are encouraged to come up with amendments to this ADR to better capture -> the actual requirements. - -Each submodule is responsible for dispatching the messages it is given to the appropriate -message processing function and, if successful, pass the resulting data to the persistance -function defined in the previous section. - -To this end, the submodule should define an enumeration of all messages, in order -for the top-level submodule dispatcher to forward them to the appropriate processor. -Such a definition for the ICS 003 Connection submodule is given below. - -```rust -pub enum ConnectionMsg { - ConnectionOpenInit(MsgConnectionOpenInit), - ConnectionOpenTry(MsgConnectionOpenTry), - ... -} -``` -The actual implementation of a submodule dispatcher is quite straightforward and unlikely to vary -much in substance between submodules. We give an implementation for the ICS 003 Connection module below. - -```rust -pub fn dispatch(ctx: &mut Ctx, msg: Msg) -> Result, Error> -where - Ctx: ConnectionReader + ConnectionKeeper, -{ - match msg { - Msg::ConnectionOpenInit(msg) => { - let HandlerOutput { - result, - log, - events, - } = connection_open_init::process(ctx, msg)?; - - connection::keep(ctx, result)?; - - Ok(HandlerOutput::builder() - .with_log(log) - .with_events(events) - .with_result(())) - } - - Msg::ConnectionOpenTry(msg) => // omitted - } -} -``` - -In essence, a top-level dispatcher is a function of a message wrapped in the enumeration introduced above, -and a "context" which implements both the `Reader` and `Keeper` interfaces. - -### Dealing with chain-specific datatypes - -The ICS 002 Client submodule stands out from the other submodules as it needs -to deal with chain-specific datatypes, such as `Header`, `ClientState`, and -`ConsensusState`. - -To abstract over chain-specific datatypes, we introduce a trait which specifies -both which types we need to abstract over, and their interface. - -For the ICS 002 Client submodule, this trait looks as follow: - -```rust -pub trait ClientDef { - type Header: Header; - type ClientState: ClientState; - type ConsensusState: ConsensusState; -} -``` - -The `ClientDef` trait specifies three datatypes, and their corresponding interface, which is provided -via a trait defined in the same submodule. - -A production implementation of this interface would instantiate these types with the concrete -types used by the chain, eg. Tendermint datatypes. Each concrete datatype must be provided -with a `From` instance to lift it into its corresponding `Any...` enumeration. - -For the purpose of unit-testing, a mock implementation of the `ClientDef` trait could look as follows: - -```rust -struct MockHeader(u32); - -impl Header for MockHeader { - // omitted -} - -impl From for AnyHeader { - fn from(mh: MockHeader) -> Self { - Self::Mock(mh) - } -} - -struct MockClientState(u32); - -impl ClientState for MockClientState { - // omitted -} - -impl From for AnyClientState { - fn from(mcs: MockClientState) -> Self { - Self::Mock(mcs) - } -} - -struct MockConsensusState(u32); - -impl ConsensusState for MockConsensusState { - // omitted -} - -impl From for AnyConsensusState { - fn from(mcs: MockConsensusState) -> Self { - Self::Mock(mcs) - } -} - -struct MockClient; - -impl ClientDef for MockClient { - type Header = MockHeader; - type ClientState = MockClientState; - type ConsensusState = MockConsensusState; -} -``` - -Since the actual type of client can only be determined at runtime, we cannot encode -the type of client within the message itself. - -Because of some limitations of the Rust type system, namely the lack of proper support -for existential types, it is currently impossible to define `Reader` and `Keeper` traits -which are agnostic to the actual type of client being used. - -We could alternatively model all chain-specific datatypes as boxed trait objects (`Box`), -but this approach runs into a lot of limitations of trait objects, such as the inability to easily -require such trait objects to be Clonable, or Serializable, or to define an equality relation on them. -Some support for such functionality can be found in third-party libraries, but the overall experience -for the developer is too subpar. - -We thus settle on a different strategy: lifting chain-specific data into an `enum` over all -possible chain types. - -For example, to model a chain-specific `Header` type, we would define an enumeration in the following -way: - -```rust -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] // TODO: Add Eq -pub enum AnyHeader { - Mock(mocks::MockHeader), - Tendermint(tendermint::header::Header), -} - -impl Header for AnyHeader { - fn height(&self) -> Height { - match self { - Self::Mock(header) => header.height(), - Self::Tendermint(header) => header.height(), - } - } - - fn client_type(&self) -> ClientType { - match self { - Self::Mock(header) => header.client_type(), - Self::Tendermint(header) => header.client_type(), - } - } -} -``` - -This enumeration dispatches method calls to the underlying datatype at runtime, while -hiding the latter, and is thus akin to a proper existential type without running -into any limitations of the Rust type system (`impl Header` bounds not being allowed -everywhere, `Header` not being able to be treated as a trait objects because of `Clone`, -`PartialEq` and `Serialize`, `Deserialize` bounds, etc.) - -Other chain-specific datatypes, such as `ClientState` and `ConsensusState` require their own -enumeration over all possible implementations. - -On top of that, we also need to lift the specific client definitions (`ClientDef` instances), -into their own enumeration, as follows: - -```rust -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum AnyClient { - Mock(mocks::MockClient), - Tendermint(tendermint::TendermintClient), -} - -impl ClientDef for AnyClient { - type Header = AnyHeader; - type ClientState = AnyClientState; - type ConsensusState = AnyConsensusState; -} -``` - -Messages can now be defined generically over the `ClientDef` instance: - - -```rust -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct MsgCreateClient { - pub client_id: ClientId, - pub client_type: ClientType, - pub consensus_state: CD::ConsensusState, -} - -pub struct MsgUpdateClient { - pub client_id: ClientId, - pub header: CD::Header, -} -``` - -The `Keeper` and `Reader` traits are defined for any client: - -```rust -pub trait ClientReader { - fn client_type(&self, client_id: &ClientId) -> Option; - fn client_state(&self, client_id: &ClientId) -> Option; - fn consensus_state(&self, client_id: &ClientId, height: Height) -> Option; -} - -pub trait ClientKeeper { - fn store_client_type( - &mut self, - client_id: ClientId, - client_type: ClientType, - ) -> Result<(), Error>; - - fn store_client_state( - &mut self, - client_id: ClientId, - client_state: AnyClientState, - ) -> Result<(), Error>; - - fn store_consensus_state( - &mut self, - client_id: ClientId, - consensus_state: AnyConsensusState, - ) -> Result<(), Error>; -} -``` - -This way, only one implementation of the `ClientReader` and `ClientKeeper` trait is required, -as it can delegate eg. the serialization of the underlying datatypes to the `Serialize` bound -of the `Any...` wrappper. - -Both the `process` and `keep` function are defined to take a message generic over -the actual client type: - -```rust -pub fn process( - ctx: &dyn ClientReader, - msg: MsgCreateClient, -) -> HandlerResult, Error>; - -pub fn keep( - keeper: &mut dyn ClientKeeper, - result: CreateClientResult, -) -> Result<(), Error>; -``` - -Same for the top-level dispatcher: - -```rust -pub fn dispatch(ctx: &mut Ctx, msg: ClientMsg) -> Result, Error> -where - Ctx: ClientReader + ClientKeeper; -``` - -With this boilerplate out of way, one can write tests using a mock client, and associated mock datatypes -in a fairly straightforward way, taking advantage of the `From` instance to lift concerete mock datatypes -into the `Any...` enumeration: - -```rust - #[test] - fn test_create_client_ok() { - let client_id: ClientId = "mockclient".parse().unwrap(); - - let reader = MockClientReader { - client_id: client_id.clone(), - client_type: None, - client_state: None, - consensus_state: None, - }; - - let msg = MsgCreateClient { - client_id, - client_type: ClientType::Tendermint, - consensus_state: MockConsensusState(42).into(), // lift into `AnyConsensusState` - }; - - let output = process(&reader, msg.clone()); - - match output { - Ok(HandlerOutput { - result, - events, - log, - }) => { - // snip - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } -``` - -## Status - -Proposed - -## Consequences - -### Positive -- clear separation of message handlers logic (processing and persistence logic) from the store -- provide support to mock the context of a handler and test the handler functionality in isolation - - -### Negative -- data type system around submodule ICS02 is relatively complex - -### Neutral - -## References \ No newline at end of file diff --git a/docs/architecture/adr-004-relayer-domain-decomposition.md b/docs/architecture/adr-004-relayer-domain-decomposition.md deleted file mode 100644 index 23a7d8d39c..0000000000 --- a/docs/architecture/adr-004-relayer-domain-decomposition.md +++ /dev/null @@ -1,303 +0,0 @@ -# ADR 004: Relayer Domain Decomposition - -## Changelog -* 21.7.2020: Initial sketch -* 27.7.2020: Dependencies outline -* 5.10.2020: Update based on sketch -* 2.11.2020: Reviewed & accepted - -## Context - -The IBC handlers queries and relayer are defined loosely in the [specs]. -The goal of this ADR is to provide clarity around the basic domain objects -from the perspective of the relayer, -their interfaces as well as dependencies between them in order to -guide the implementation. The success criteria for the decomposition is -how well it can be tested. It's expected that any decomposition will -lend itself to tight unit tests allowing more collaborators make change -in the code base with confidence. - -## Decision -The decomposition should be motivated by what we want to test and what -we need to mock out to exercise the core logic. - -We want to be able to test the following high-level functions: - -* Client create and update - * With different chain states -* Connection handshake - * With different chain states -* Channel Setup - * With different chain states -* Datagram Construction - * With different chain states -* Datagram to Transaction - * Batching - * Signing -* Datagram Submission - * With different chain states - * Missing Client Updates - * With Missing Proofs -* Handlers (datagrams, chain state) -> events - * Handling the batch of datagrams - * With different chain states - * Specifically, the key value store - * Produce events - -## Dependencies - -In this section, we map the operations which need to be performed at different -stages of both the relayer and the IBC handlers. This gives an outline -of what low-level operations and dependencies need to be mocked out to test each -stage in isolation, and will inform the design of the various traits needed -to mock those out. - -Not all stages are listed here because the operations and dependencies -outlined below cover all the possible dependencies at each stage. - -### Initializing a connection from the relayer - -- Need a relayer configuration (relayer.toml) -- Query chain B for its commitment prefix (ABCI query) -- Send `MsgConnectionOpenInit` message to chain A (transaction) - -### `ConnOpenInit` (Handler) - -- Provable store -- Private store - -### `updateIBCClient` (Relayer) - -- get the latest height from chain A (Query) -- get client consensus state from chain B (Query) -- get latest header + minimal set from chain A (Light Client) -- verify client state proof (Prover) -- create and submit datagrams to update B's view of A (Message Builder, Transaction) -- replace full node for B with other full node (PeerList) -- create and submit proof of fork (Fork Evidence Reporter) -- wait for UpdateClient event (Event Subscription) - -### `pendingDatagrams` (Relayer) -Builds the datagrams required by the given on-chain states. -For connection datagrams: -- get connection objects from chain A (Query) -- get connection objects from chain B (Query) -- get proof\* of connection state (e.g. `Init`) from chain A (Query, Prover, Light Client) -- get proof\* of client state and consensus state from chain A (Query, Prover, Light Client) - - \* involves querying the chain + get header/minimal set + verify proof -- build the next message in the connection handshake, e.g. `ConnOpenTry` (Message Builder) - -Channel datagrams are built similarly. Packet datagrams are triggered by events, and they are detailed in the Link section below. - -### IBC Module - -For every a transaction in a block of height H: - -- call appropriate handler (this is realized by ICS26 routing sub-module), -- If handler succeeds (transaction does not abort), then - apply the updates to the key-value store (provable & private), and also - get the current height H and emit appropriate events. - -## Objects - -The main domain objects in the relayer (`ForeignClient`, `Connection`, `Channel`) -will be created as concrete types which contain their configuration. -These objects are the relayer's representation of different parts of state from the two chains. -Dependencies between types indicate runtime dependencies of the chain -state. For instance, objects parameterized by a `ForeignClient` type, such as a `Connection`, -have the pre-condition that the runtime completed client creation before operating with -those objects. - -### ChainHandle - -The ChainHandle trait is a local representation of a chain state on the relayer. -The API of a ChainHandle provides reliable access to chain state whether -crashed, constructed or queried. We envision a mock version of a chain -will be used to test both handler and relayer logic ([#158]). - -The method set of the ChainHandle trait will reflect specific needs and not -intermediate representations. Query and light client verification -concerns will be internal to the chain handle implementation and not exposed -via this API. Users of a ChainHandle implementation (i.e., relayer methods) -can assume verified data or receive an Error and act appropriately. - -```rust -trait ChainHandle { - // Generate a packet - fn create_packet(&self, event: IBCEvent) -> Result; - - // Fetch the height of an IBC client hosted by a chain - // - query the consensus_state of src on dst - // - query the highest consensus_state - // - verify if with the light client - // - return the height - fn get_height(&self, client: &ForeignClient) -> Result; - - // Submit a transaction to a chains underlying full node - fn submit(&self, transaction: EncodedTransaction) -> Result<(), ChainError>; - - // ... -} - -``` - -### Connection - -```rust -impl Connection { - fn new( - chain_a: &ChainHandle, - chain_b: &ChainHandle, - foreign_client_a: &ForeignClient, - foreign_client_b: &ForeignClient, - config: ConnectionConfig) - -> Result { - // Establish a connection between ChainA and ChainB via ICS 3 handshake. - // For a first version this can be completely synchronous (a blocking call). - } -} -``` - -### Channel -```rust -impl Channel { - fn new( - chain_a: &ChainHandle, - chain_b: &ChainHandle, - connection: &Connection, - config: ChannelConfig) - -> Result { - // Establish a channel between two modules (i.e., ICS4 handshake). - } -} -``` - -## Link - -A link is the object that connects two specific modules on separate chains. -Links are responsible for relaying packets from `chain_a` -to `chain_b` and are therefore uni-directional. A single relayer process -should be able to support multiple link instances and each link should -run in its own thread. Links depend on `ForeignClient`s, -`Connection` and `Channel`. - -```rust -struct Link { - src_chain: &ChainHandle, - dst_chain: &ChainHandle, - channel: &Channel, -} - -impl Link { - fn new(channel: &Channel, config: LinkConfig) - -> Link { - // ... - } - - /// Relay Link specific packets from src_chain to dst_chain - /// Expect this to run in a thread - fn run(self) -> Error { - let subscription = self.src_chain.subscribe(&self.channel); - - for (target_height, events) in subscription.iter() { - // ... - - let datagrams = events.map(|event| { - Datagram::Packet(self.dsrc_chain.build_packet(target_height, event)) - }); - - for attempt in self.config_submission_attempts { - let current_height = self.dst_chain.get_height(&self.connection.channel.foreign_client)?; - let signed_headers = self.src_chain.get_minimal_set(current_height, target_height)?; - - let mut attempt_datagrams = datagrams.clone(); - attempt_datagrams.push(Datagram::ClientUpdat(ClientUpdate::new(signed_headers))); - - let transaction = Transaction::new(datagram); - self.dst_chain.submit(transaction.sign().encode())?; - } - - } - } -} -``` - -### Example Main - -Example of the initializing of a single link between two chains. Each -chain has its own runtime and exposes a `handle` to communicate with -that runtime from different threads. There are dependencies between -ForeignClients, Connections, Channels and Links which are encoded in the -type system. The construction of them reflects that their corresponding -handshake protocol has completed successfully. - -```rust -fn main() -> Result<(), Box> { - let src_chain = ChainRuntime::new(); - let dst_chain = ChainRuntime::new(); - - /// chains expose handlers for commuicating with the chain related runtime - /// which move into their own threads - let src_chain_handle = src_chain.handle(); - thread::spawn(move || { - src_chain.run().unwrap(); - }); - - let dst_chain_handle = dst_chain.handle(); - thread::spawn(move || { - // What should we do on return here? - dst_chain.run().unwrap(); - }); - - let src_foreign_client_on_dst = ForeignClient::new( - &src_chain_handle, - &dst_chain_handle)?; - - let dst_foreign_client_on_src = ForeignClient::new( - &src_chain_handle, - &dst_chain_handle)?; - - let connection = Connection::new( - &src_chain_handle, - &dst_chain_handle, - dst_foreign_client_on_src, - src_foreign_client_on_dst, - ConnectionConfig::default()).unwrap(); - - let channel = Channel::new( - &src_chain_handle, - &dst_chain_handle, - connection, - ChannelConfig::default()).unwrap(); - - let link = Link::new( - src_chain_handle, - dst_chain_handle, - channel, - LinkConfig::default())?; - - link.run()?; - - Ok(()) -} -``` - -## Status - -- Accepted (first implementation in [#335](https://github.com/informalsystems/ibc-rs/pull/335)). - -## Consequences - -### Positive -* Clean abstractions an isolation from IO -* Handshakes are correct by construction -* Sane error handling - -### Negative - -### Neutral - -## References - -[specs]: https://github.com/cosmos/ibc/tree/master/spec -[#158]: https://github.com/informalsystems/ibc-rs/issues/158 diff --git a/docs/architecture/adr-005-relayer-v0-implementation.md b/docs/architecture/adr-005-relayer-v0-implementation.md deleted file mode 100644 index e15ad3783e..0000000000 --- a/docs/architecture/adr-005-relayer-v0-implementation.md +++ /dev/null @@ -1,234 +0,0 @@ -# ADR 005: Relayer v0.1 implementation - -## Changelog - -* 04.01.2020: First draft proposed. -* 09.02.2020: Revised, fixed todos, reviewed. - -## Context - -This ADR documents the implementation of the `v0.1` [relayer lib crate] -[ibc-relayer]. -This library is instantiated in the [Hermes][hermes] binary of the -[ibc-relayer-cli crate][ibc-relayer-cli] (which is not the focus of this discussion). - -As a main design goal, `v0.1` is meant to lay a foundation upon which we can -add more features and enhancements incrementally with later relayer versions. -This is to say that `v0.1` may be deficient in terms of features or -robustness, and rather aims to be simple, adaptable, and extensible. -For this reason, we primarily discuss aspects of concurrency and architecture. - - -### Relayer versioning scheme - -On the mid-term, the relayer architecture is set out to evolve across three -versions. - -The first of these, `v0.1`, makes several simplifying assumptions -about the environment of the relayer and its features. These assumptions -are important towards limiting the scope that `v0.1` aims to -cover, and allowing a focus on the architecture and concurrency model to -provide for growth in the future. - -These assumptions are documented below in the [decision](#decision) section. - -## Decision - - -### Configuration - -For the most part, the relayer configuration will be -static: the configuration for chains and their respective objects (clients, -connections, or channels) will be fully specified in the relayer -configuration file and will not change throughout execution. -Light clients are also statically defined in the config file, and cannot be -switched dynamically at runtime. - -Recent changes to the ICS protocol specifies identifier -selection for clients, connections, and channels to be [deterministic][ids]. -For this reason, we will not need to specify any identifiers in the -configuration file. -We only specify which pairs of chains should communicate with one -another, and the port identifier to use for that purpose. -This pair of chains plus their corresponding port identifiers is called a -__relaying path__. -Any relaying path is unidirectional. - -An example with the relevant section of the configuration file follows. - -```toml -[[connections]] -a_chain = 'ibc-0' -b_chain = 'ibc-1' - -[[connections.paths]] -a_port = 'transfer' -b_port = 'transfer' -``` - -Here there are two chains, ith one connection between them, and a path for -relaying on the port called `transfer` on both chains, from chain `ibc-0` -to `ibc-1`. - -### Links - -A [link][link] is a relayer-level protocol that implements packet relay across -one relaying path. -The relayer at `v0.1` will focus on a single link. -This limitation will be lifted in subsequent versions. - -### Chain State - -Each chain is assumed to start with an empty IBC state. -This means that the relayer will take care of creating the client, -connection, and channel objects respectively on each side of a link. - -### Proof Verification - -The `v0.1` relayer will _not_ do proof verification. - -### Feature set - -The [complete list of features is documented elsewhere][features] in detail. - -## Relayer Concurrency Model - -Relayer `v0.1` works under the assumption that there are no competing relayers -running concurrently (which may interfere with each other). -Furthermore, as stated above, the relayer will handle a single link (one -packet relaying direction from a source chain to a destination chain). -The following diagram sketches the relayer domain decomposition at a -high-level, with a focus on one link. - -relayer v0 domain 
-decomposition - - -The relayer supports a single stack made of a connection, a channel, and a link. - -The application thread that runs upon starting creates a link associated -with the relaying path. -It also triggers messages for creating all objects (clients, a connection, -and a channel) underlying this link. -These will cause the relayer to build and send all messages associated with -the handshakes for these objects, plus a retry mechanism. -It should work even these events are received by the link in the same time -with the live chain IBC events. -In other words, no synchronization with starts of other threads should be -required. - -Beside the application thread, the relayer maintains one or more threads -for each chain. -The number of threads per chain is chain-specific: -- For the production chain [Gaia][gaia] (see also the [References] - (#references) below), there are three separate - threads, described in more detail in the [architecture](#architecture) - section. -- For the mock chain ([Mock](#references)), there is one thread. - -The link runs in the main application thread. This consumes events -from the chains, performs queries and sends transactions synchronously. - - -#### Architecture - -The following diagram provides more detail into how the relayer is -structured. -Here the focus is on the interfaces within the relayer, as well as the -interface between the relayer and a single chain. - -relayer v0 architecture - -##### Legend - -Some of the notation from this figure has the following meaning. - -| Notation | Description | Examples | -| ------ | ----------- | ----------- | -| `E` | Enum: typically messages between threads | `ChainRequest`; `IBCEvent` | -| `S` | Struct: a processing element | `ForeignClient`; `Connection` | -| `T` | Trait: typically interface between threads | `Chain`; `LightClient` | - -##### Levels of abstraction - -At the top of this diagram, there is a chain consisting of multiple full nodes. -The deeper (i.e., lower) we go into this sketch, the closer we get to the user, or -Hermes (the relayer CLI). -To understand the relayer architecture intuitively, we can break down the -levels of abstraction as follows: - -###### 1. The actual chain, comprising a number of full nodes -- This is the lowest level of abstraction, the farthest away from relayer - users -- The relayer communicates with a chain via three interfaces: - - (i) the `LightClient` trait (handled via the supervisor for the - production chain), - - (ii) the `Chain` trait (where the communication happens over the - ABCI/gRPC interface primarily), and - - (iii) an `EventMonitor` which subscribes to a full node, and carries batches - of events from that node to the chain runtime in the relayer. Currently, - the relayer registers for `Tx` and `Block` notifications. It then extracts - the IBC events from the `Tx` and generates a `NewBlock` event also for the - block. Note that a notification may include multiple IBC Events. - -###### 2. The chain runtime - -- This is an intermediary layer, sitting between the relayer application and - any chain(s); -- The runtime is universal for all possible chains, i.e., does _not_ contain any - chain-specific code; -- Accepts as input requests from the application (Hermes, the CLI), in the form of - [`ChainRequest`][chain-req] via a crossbeam channel -- Responds to the application via a crossbeam channel -- Has objects which implement the three interfaces named above - (`LightClient`, `Chain`, and `EventMonitor`) and orchestrates access to - these objects as required by application requests - -###### 3. The relayer application - -- Communicates with the runtime via a `ChainHandle`, which contains the - appropriate crossbeam sender and receiver channels to/from the runtime -- Upon start-up, instantiates relayer-level objects in the following order: - two `ForeignClient`s (one per chain), a `Connection` (which contains the - two clients), a `Channel` (containing the connection), and on top of that - a `Link`. -- The code here is part of the Hermes (relayer CLI) binary. - -##### Threads - -Each thread in this diagram is a separate box shaded in gray. -There are four threads running: the `EventMonitor`, the `Supervisor`, the -`Runtime`, and the main application thread, called `V0Cmd`. - -## Status - -Accepted - -## Consequences - -### Positive -- prepares the relayer crate for incremental growth - -### Negative - -### Neutral - -## References: - -- __Gaia__: the correct Gaia instance for working with `v0.1` can be obtained - from https://github.com/cosmos/relayer, with `git checkout v4.0.0` by - executing `make build-gaia`. This - [comment](https://github.com/informalsystems/ibc-rs/pull/449#issuecomment-750248113) - provides additional insights into development-time relayer `v0.1` environment. - -- __Mock__: https://github.com/informalsystems/ibc-rs/blob/master/relayer/src/chain/mock.rs - - - -[ids]: https://github.com/cosmos/cosmos-sdk/pull/7993 -[link]: https://github.com/informalsystems/ibc-rs/blob/master/docs/architecture/adr-004-relayer-domain-decomposition.md#link -[chain-req]: https://github.com/informalsystems/ibc-rs/blob/379dd9812f6e7a42b9428f64eb52fe292d417476/relayer/src/chain/handle.rs#L51 -[ibc-relayer]: https://github.com/informalsystems/ibc-rs/tree/master/relayer/ -[ibc-relayer-cli]: https://github.com/informalsystems/ibc-rs/tree/master/relayer-cli/ -[hermes]: https://hermes.informal.systems -[features]: https://github.com/informalsystems/ibc-rs/blob/v0.1.0/guide/src/feature_matrix.md diff --git a/docs/architecture/adr-006-hermes-v0.2-usecases.md b/docs/architecture/adr-006-hermes-v0.2-usecases.md deleted file mode 100644 index bbbabc5596..0000000000 --- a/docs/architecture/adr-006-hermes-v0.2-usecases.md +++ /dev/null @@ -1,294 +0,0 @@ -# ADR 006: Hermes v0.2.0 Use-Cases - -## Changelog -* 16.02.2021: Proposed. - -## Context - -One major problem with planning for the evolution of Hermes is that presently -there is insufficient clarity regarding its requirements. -It is not known who are the typical Hermes users (is it human operators or -automated pipelines?), and what are their primary use-cases. - -This ADR proposes a few use-cases that seem interesting from the point -of view of a general target base of users, and which will -hopefully be a subset of the requirements of (any) future users. - -Three elements that provide further context for this discussion are: - -1. Hermes is still at an early stage of implementation, so these use-cases are - not set in stone. - -2. Some concrete use-cases are starting to emerge ([#628][#628]), which Hermes - v0.1.0 either does not cover altogether, or covers poorly (e.g., because of - inconsistent UX), thus informing this proposal. - -3. Hermes is one of _three_ relayer binaries that are being developed roughly in -parallel. The other two are being developed in Go and Typescript, -respectively (see the [references](#references) section). -In this context, it is plausible that Hermes will focus on performance, -robustness, and richness of features on a longer term. - -## Decision - -This is a summary of the use-cases (commands) discussed in the rest of this ADR. -Note that the commands below omit the binary name `hermes` , to keep the command -length to a minimum. - -To create and update a client: -- `create client ` - - Optional params: `[--clock-drift ] [--trusting-period ] [--trust-threshold ]` -- `update client ` - -To create a connection: -- `create connection ` - - Optional: `[--delay ]` -- `create connection --client-a --client-b ` - - Optional: `[--delay ]` - -To create a channel: -- `create channel --port-a --port-b ` - - Optional: `[--order ] [--version ]` -- `create channel --connection-a --port-a --port-b ` - - Optional: `[--order ] [--version ]` - -To start packet relaying: -- `start --port-a --port-b ` - - Optional: `[--order ] [--version ]` -- `start --connection-a --port-a --port-b ` - - Optional: `[--order ] [--version ]` -- `start --channel-a --port-a ` - -For finishing pre-initialized, but unfinished object handshakes, for connection and channel: -- `establish connection --connection-a ` -- `establish channel --channel-a --port-a ` - -### Rationale - -The primary goal for the uses-cases we decided to cover is to prevent situations -where users could get stuck. For example, the output of a command may be -unclear, or there may be an error and thereby some CLI command -finishes partially, or two relayers concurrently try to perform some -operation(s) and interfere with each other, resulting in a chain state that is -obscure to the user, and then the user could consequently be stuck. - -The first of the patterns below seeks to help "unblock" a user. -The second pattern is a variation on the first; this permits more efficiency -because it allows the reuse of previously-created objects in the -creation of new objects on a chain (e.g., reuse a client in the creation of a -connection, or reuse a connection in the creation of a new channel). - -#### Patterns - -We propose two basic patterns that Hermes should be able to fulfil. - -1. Simple invocations to perform basic actions. - - By _action_ here we mean doing the complete handshake for an object from - scratch (specifically _connection_ or _channel_) on two chains, or - relaying packets between two chains. - - The focus here is for the command to include retrying mechanisms - (perform it _robustly_) and have the simplest interface. - -2. Allow reusing of pre-existing state for basic commands. - - The pre-existing state could be a client with some specific trust options, - for instance, and in this case Hermes would provide support for creating - a connection that uses this specific client. - - This pattern should also include a retrying mechanism. - -#### Details of Use-Cases - -Applying the above patterns to a few cases, we get the following concrete -commands that Hermes v0.2.0 should fulfil. - -##### Create & Update a Client - -- Minimal invocation: this will create the client from scratch: - -``` -create client [--clock-drift ] [--trusting-period ] [--trust-threshold ] -``` - -**Details:** -Submits a transaction of type [client create][client-create] to chain -`` (sometimes called the _destination_ chain of this -transaction). The new client will be verifying headers for -chain `` (often called the _source_ chain). - -See also the [limitations](#limitations) section discussing the optional -security parameters for this command. - -- Update a client: - -``` -update client -``` - -**Details:** -Submits a transaction to chain id `` to update the client having -identifier `` with new consensus state from up-to-date headers. -Hermes will automatically infer the target chain of this client from -the [client state][client-state]. - -- Upgrade a client: - -``` -upgrade client -``` - -**Details:** -Submits a transaction to chain id `` to upgrade the client having -identifier ``. -Hermes will automatically infer the target chain of this client from -the [client state][client-state]. - -- Upgrade all clients that target a specific chain: - -``` -upgrade clients -``` - -**Details:** -Submits a transaction to upgrade clients of all chains in the config that target -chain id ``. - -##### Create New Connection - -- Minimal invocation: this will create the connection from scratch, using - _new_ clients: - -``` -create connection [--delay ] -``` - -**Details:** -Starts a transaction to perform the connection open handshake protocol between -two chains. -The chains are called symbolically `a` and `b`, hence the option names -`` and ``. In all handshakes, Hermes submits the first -step (typically called _init_, e.g., `ConnOpenInit`), to side `a`, then the -second step (e.g., `ConnOpenTry`) to side `b`, and so on. - -The optional parameter `--delay` is the delay period that the new connection -should have. Note also the [limitations](#limitations) around the -`delay_period` feature. - -- Reusing pre-existing state, concretely, with _existing_ clients: - -``` -create connection --client-a --client-b [--delay ] -``` - -**Details:** -Similar to the previous command, this command will perform the connection -open handshake protocol, but will reuse the client with identifier from -option `--client-a`. This client is expected to exist on chain ``. -The target chain of this client is identified in the -[client state][client-state] (concretely, the target chain is represented under -`chain_id` field of the client state), which provides the identifier for the -side `b` of the new connection. On the side `b` chain, this command will -establish the connection using the client with identifier from the option -`--client-b`, which must be verifying headers for chain ``. - -##### Create New Channel - -- With _new_ connection and clients: - -``` -create channel --port-a --port-b [--order ] [--version ] -``` - -- With _existing_ specific connection: - -``` -create channel --connection-a --port-a --port-b [--order ] [--version ] -``` - -##### Packet Relaying - -- relay packets over a _new_ channel, _new_ connection, and _new_ clients: - -``` -start --port-a --port-b [--order ] [--version ] -``` - -- relay packets over a _new_ channel that re-uses an _existing_ connection: - -``` -start --connection-a --port-a --port-b [--order ] [--version ] -``` - -- relay packets over an _existing_ channel: - -``` -start --channel-a --port-a -``` - -##### Finishing partially complete handshakes: - -These commands serve the purpose of covering certain corner-cases where a -handshake may be partially started. - -- Finalize handshake for _partially established_ connection: - -``` -establish connection --connection-a -``` - -- Finalize handshake for _partially established_ channel: - -``` -establish channel --channel-a --port-a -``` - - -### Command Output - -By default, the command will provide human-readable output, i.e., pretty -printing. -In practice, the final result of a Hermes command is captured in an -[Output][output] structure that has support for JSON serialization. To -enable JSON, we add a configuration parameter `log_json`. The global section -of the config file will look as follows: - -```toml -[global] -log_level = 'error' -log_json = 'false' -``` - -By default, this parameter is `false`. When set to `true`, all the Hermes output -will be in JSON. - -## Status - -Partially implemented. - -## Consequences -### Positive - -- Simpler, more accurate CLI invocation: "create" is more precise than "tx" or - "handshake" -- Improved output for human operators. - -### Negative - -- Some commands will possibly turn out to be useless. -- Requires some rethinking of the Relayer architecture (mainly because of the - [limitations](#limitations) surrounding light clients.) - -### Neutral - - -## References - -- Relayer in Go: https://github.com/cosmos/relayer -- Relayer in Typescript: https://github.com/confio/ts-relayer - - - -[#628]: https://github.com/informalsystems/ibc-rs/issues/628 -[#673]: https://github.com/informalsystems/ibc-rs/issues/673 -[#640]: https://github.com/informalsystems/ibc-rs/issues/640 -[client-state]: https://hermes.informal.systems/commands/queries/client.html#query-the-client-state -[client-create]: https://docs.rs/ibc/0.1.1/ibc/ics02_client/msgs/create_client/index.html -[output]: https://github.com/informalsystems/ibc-rs/blob/1f2e72dbcafee5a8bbdab381ff4927d5870b4b59/relayer-cli/src/conclude.rs#L80 diff --git a/docs/architecture/adr-007-error.md b/docs/architecture/adr-007-error.md deleted file mode 100644 index 2faf45cd23..0000000000 --- a/docs/architecture/adr-007-error.md +++ /dev/null @@ -1,228 +0,0 @@ -# ADR 007: Error Management - -## Changelog - -* 2020-07-26: Initial Proposal - -## Context - -This document describes the reason behind the switch from using -`anomaly` for error handling to -the [`flex-error`](https://docs.rs/flex-error/) crate that is developed in-house. - -## Decision - -### Problem Statement - -To keep things brief, we will look at the issue of error handling from a specific example -in `relayer/src/error.rs`: - -```rust -pub type Error = anomaly::Error; - -#[derive(thiserror::Error)] -pub enum Kind { - #[error("GRPC error")] - Grpc, - ... -} - -impl Kind { - pub fn context(self, source: impl Into>) -> anomaly::Context { - Context::new(self, Some(source.into())) - } -} -``` - -The design above is meant to separate between two concerns: - - - The metadata about an error, as captured in `Kind`. - - The trace of how the error occured, as captured in `anomaly::Context`. - - The type `Error` is defined to be `anomaly::Error`, which is a newtype wrapper to `Box>`. - -There are a few issues with the original design using `anomaly`: - - - The error source type is erased and turned into a `Box`, making it difficult to recover metadata - information about the original error. - - The `Kind::context` method allows any error type to be used as an error source, making it difficult to statically - analyze which sub-error has what kind of error source. - -We can demonstrate the design issue with a specific use case: - -```rust -pub fn unbonding_period(&self) -> Result { - let mut client = self - .block_on(QueryClient::connect(self.grpc_addr.clone())) - .map_err(|e| Kind::Grpc.context(e))?; - - let request = Request::new(QueryParamsRequest {}); - - let response = self - .block_on(client.params(request)) - .map_err(|e| Kind::Grpc.context(e))?; - ... -} -``` - -Without the help of an IDE, it would be challenging to figure out that -the first use of `Kind::Grpc.context` has `tonic::Status` as the error source -type, while the second use has the error source type -`tonic::TransportError`. - -The mixing up of `tonic::Status` and `tonic::TransportError` as error sources -are not too critical in this specific case. However this would not be the -case if we want to use the error source information to determine whether -an error is _recoverable_ or not. For instance, let's say if we want to -implement custom retry logic only when the error source is -`std::io::Error`, there is not easy way to distinguished if an error -variant `Kind::Grpc` is caused by `std::io::Error`. - -### Proposed Design - -A better design is to define error construction functions with _explicit_ -error sources. The proposed design is as follows: - -```rust -pub struct Error(pub ErrorDetail, pub eyre::Report); - -pub enum ErrorDetail { - GrpcStatus { - status: tonic::Status - }, - GrpcTransport, - ... -} - -impl Error { - pub fn grpc_status(status: tonic::Status) -> Error { - let detail = ErrorDetail::GrpcStatus { status }; - Error(detail, Eyre::msg(detail)) - } - - pub fn grpc_transport(source: tonic::TransportError) -> Error { - let detail = ErrorDetail::GrpcTransport; - let trace = Eyre::new(source).wrap_err(detail); - Error(detail, trace) - } -} -``` - -There are a few things addressed by the design above: - - We use the `eyre::Report` type as an _error tracer_ to trace - the error sources, together with additional information such as backtrace. - - Depending on the error source type, we want to have different strategies - to trace the error. - - For example, we may not care about the metadata - inside `tonic::TransportError`, so we just discard the data - after tracing it using `eyre`. - - We define _error constructor functions_ that handle the error source using - different strategies. The function constructs the `ErrorDetail` and - `eyre::Report` values, and then wrap them as the `Error` tuple. - -In general, when the error sources are defined by external libraries, -we have little control of how the types are defined, and need to have -different ways to handle them. -But when we have multiple error types that are defined in the same crate, -we want to have special way to handle the propagation of error. - -For example, consider the `LinkError` type, which has the error -we defined earlier as the error source: - -```rust -use crate::error::{Error as RelayerError, ErrorDetail as RelayerErrorDetail}; - -pub struct LinkError(LinkErrorDetail, eyre::Report); - -pub enum LinkErrorDetail { - Relayer { - source: RelayerErrorDetail - }, - ... -} - -impl LinkError { - pub fn relayer_error((source_detail, trace): RelayerError) -> LinkError { - let detail = LinkErrorDetail::Relayer(source_detail); - LinkError(detail, trace.wrap_err(detail)) - } -} -``` - -We propagate the error detail to LinkErrorDetail so that we can recover -additional detail later on. Furthermore, we extract the `eyre::Report` -from the error source and use it to add additional information -when we construct `LinkError`. - -### `flex-error` - -The proposed design has a lot of boilerplate required to properly define -the error types. To reduce boilerplate, we have developed -[`flex-error`](https://docs.rs/flex-error/) with the `define_error!` -macro which makes it straightforward to implement the error types -using a DSL syntax. With that, the error types can instead be defined as: - -```rust -use flex_error::{define_error, TraceError}; - -define_error! { - Error { - GrpcStatus - { status: GrpcStatus } - | e | { format!("GRPC call return error status {0}", e.status) }, - GrpcTransport - [ TraceError ] - | _ | { "error in underlying transport when making GRPC call" }, - ... - } -} -``` - -Aside from the syntactic sugar provided by the `define_error!` macro, `flex-error` -also allows error tracer implementation to be switched based on the Cargo feature -flags set on the `flex-error` crate. For example, we can switch from the -[`eyre`](https://docs.rs/eyre/) tracer to the [`anyhow`](https://docs.rs/anyhow/) -tracer by disabling `"flex-error/eyre_tracer"` and enabling `"flex-error/anyhow_tracer"` features. - -If all error tracer features and the `"flex-error/std"` feature are disabled, -a simple `flex_error::StringTracer` is used for tracing errors. The `StringTracer` -do not provide additional information such as back trace, but it is useful -for supporting `no_std`, where standard constructs such as `std::error::Error` and -error backtrace are not available. - -The full documentation for `flex-error` is available at [Docs.rs](https://docs.rs/flex-error/). - -## Status - -Accepted - The PR has been merged in [#988](https://github.com/informalsystems/ibc-rs/pull/988) - -## Consequences - -All error definitions in the `ibc-rs` project will be defined using the -`flex-error` crate. - -### Positive - -- Fine grained error handling. -- Flexible error tracing. -- `no_std` support. - -### Negative - -- It takes time to learn about the DSL and how to manage different error sources. -- Compile errors arise inside the macros may be difficult to debug. -- IDE provides limited integration for code inside macros. - -### Neutral - -- The error variants are defined in the `ErrorDetail::ErrorVariant{...}` convention, - but the error constructor functions are defined in the `Error::error_variant(...)` - convention. - -## References - -- [PR #988](https://github.com/informalsystems/ibc-rs/pull/988): - Use flex-error to define errors -- [Issue #712](https://github.com/informalsystems/ibc-rs/issues/712): - Relayer error handling specification -- [Issue #11588](https://github.com/informalsystems/ibc-rs/issues/1158): - Tracking issue for no-std support diff --git a/docs/architecture/adr-008-ics20-implementation.md b/docs/architecture/adr-008-ics20-implementation.md deleted file mode 100644 index 3839a2315a..0000000000 --- a/docs/architecture/adr-008-ics20-implementation.md +++ /dev/null @@ -1,236 +0,0 @@ -# ADR 008: ICS20 Implementation Proposal - -## Status - -Accepted - -## Changelog - -* 21.04.2022: Draft Proposed - -## Context - -The goal of this ADR is to provide recommendations and a guide for implementing the ICS20 application. - -## Decision - -The proposal is broken down into traits that should be implemented by the ICS20 module. It also defines some primitives -that would help in building a module compliant with the ICS20 spec. - -#### Types - -The implementation must provide a base denom type that is serializable to string. Additionally, the following denom -types must also be provided: - -* `HashedDenom`: A denom type that can be serialized to a string of the form `'ibc/{Hash(trace_path/base_denom)}'`. -* `PrefixedDenom`: A denom type with a base denom which is prefixed with a trace. The trace itself consists - of `'{PortId}/{ChannelId}'` pairs and enables coin source tracing[^1]. - -```rust -/// Base denomination type -pub struct Denom(String); -``` - -A `Coin` defines a token with a denomination and an amount where the denomination may be any one of the denom types -described above. - -```rust -#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] -pub struct Coin { - /// Denomination - pub denom: Denom, - /// Amount - pub amount: U256, -} -``` - -The ICS20 acknowledgement type and packet data type are defined in the spec[^2] and maybe modelled as follows. Note that -these types must be (de)serializable from/to JSON. - -```rust -pub enum ICS20Acknowledgement { - /// Equivalent to b"AQ==" - Success, - /// Error Acknowledgement - Error(String) -} - -pub struct FungibleTokenPacketData { - denomination: Denom, - amount: U256, - sender: String, - receiver: String, -} -``` - -#### Keepers and readers - -```rust -pub trait ICS20Keeper: ChannelKeeper -+ PortKeeper -+ BankKeeper::AccountId> -+ AccountKeeper::AccountId> -{ - /// The account identifier type. - type AccountId: Into; - - /// Set channel escrow address - fn set_channel_escrow_address(&mut self, port_id: &PortId, channel_id: &ChannelId) -> Result<(), ICS20Error>; - /// Sets a new {trace hash -> denom trace} pair to the store. - fn set_denom_trace(&mut self, denom_trace: DenomTrace) -> Result<(), Ics20Error>; -} - -pub trait ICS20Reader: ChannelReader -+ PortReader -+ AccountReader::AccountId> -+ BankReader::AccountId> -{ - /// The account identifier type. - type AccountId: Into + FromStr; - - /// Returns true iff sending is allowed in the module params - fn is_send_enabled(&self) -> bool; - /// Returns true iff receiving is allowed in the module params - fn is_receive_enabled(&self) -> bool; - /// get_transfer_account returns the ICS20 - transfers AccountId. - fn get_transfer_account(&self) -> AccountId; - /// Sets and returns the escrow account id for a port and channel combination - fn get_channel_escrow_address(&self, port_id: &PortId, channel_id: &ChannelId) -> Result; - /// Returns true iff the store contains a `DenomTrace` entry for the specified `HashedDenom`. - fn has_denom_trace(&self, hashed_denom: HashedDenom) -> bool; - /// Gets the denom trace associated with the specified hash in the store. - fn get_denom_trace(&self, denom_hash: HashedDenom) -> Option; -} - -pub trait BankKeeper { - /// The account identifier type. - type AccountId: Into; - - /// This function should enable sending ibc fungible tokens from one account to another - fn send_coins(&mut self, from: &Self::AccountId, to: &Self::AccountId, amt: Coin) -> Result<(), ICS20Error>; - /// This function to enable minting tokens(vouchers) in a module - fn mint_coins(&mut self, amt: Coin) -> Result<(), ICS20Error>; - /// This function should enable burning of minted tokens or vouchers - fn burn_coins(&mut self, module: &Self::AccountId, amt: Coin) -> Result<(), ICS20Error>; - /// This function should enable transfer of tokens from the ibc module to an account - fn send_coins_from_module_to_account( - &mut self, - module: Self::AccountId, - to: Self::AccountId, - amt: Coin, - ) -> Result<(), Ics20Error>; - /// This function should enable transfer of tokens from an account to the ibc module - fn send_coins_from_account_to_module( - &mut self, - from: Self::AccountId, - module: Self::AccountId, - amt: Coin, - ) -> Result<(), Ics20Error>; -} - -pub trait BankReader { - /// The account identifier type. - type AccountId: Into + FromStr; - - /// Returns true if the specified account is not allowed to receive funds and false otherwise. - fn is_blocked_account(&self, account: &Self::AccountId) -> bool; -} - -pub trait AccountReader { - /// The account identifier type. - type AccountId: Into + FromStr; - - /// This function should return the account of the ibc module - fn get_module_account(&self) -> Self::AccountId; -} - -pub trait Ics20Context: -Ics20Keeper::AccountId> -+ Ics20Reader::AccountId> -{ - type AccountId: Into + FromStr; -} -``` - -## Handling ICS20 Packets - -ICS20 messages are still a subset of channel packets, so they should be handled as such. - -The following handlers are recommended to be implemented in the `ics20_fungible_token_transfer` application in the `ibc` -crate. These handlers will be executed in the module callbacks of any third-party IBC module that is implementing an -ICS20 application on-chain. - -```rust -/// Should be used in the transaction that initiates the ICS20 token transfer -/// Performs all logic related to token transfer and returns a SendTransferPacket type -/// for the calling module to create the actual packet and register it in the ibc module. -pub fn send_transfer(ctx: &Ctx, _msg: MsgTransfer) -> Result - where Ctx: ICS20Context -{ - if !ctx.is_send_enabled() { - return Err(ICS20Error::send_disabled()); - } - - // implementation details, see ICS 20 for reference -} - -/// Handles incoming packets with ICS20 data -/// To be called inside the on_recv_packet callback -pub fn on_recv_packet(ctx: &Ctx, _packet: &Packet, _data: &FungibleTokenPacketData) -> ICS20Acknowledgement - where Ctx: ICS20Context -{ - if !ctx.is_received_enabled() { - return Err(ICS20Error::receive_disabled()); - } - - // implementation details, see ICS 20 for reference -} - -/// on_timeout_packet refunds the sender since the original packet sent was -/// never received and has been timed out. -/// To be called inside the on_timeout_packet callback -pub fn on_timeout_packet(ctx: &Ctx, data: &FungibleTokenPacketData) -> Result<(), ICS20Error> - where Ctx: ICS20Context -{ - refund_packet_token(ctx, data) -} - -/// Responds to the the success or failure of a packet -/// acknowledgement written on the receiving chain. If the acknowledgement -/// was a success then nothing occurs. If the acknowledgement failed, then -/// the sender is refunded their tokens. -/// To be called inside the on_acknowledgement_packet callback -pub fn on_acknowledgement_packet(ctx: &Ctx, ack: ICS20Acknowledgement, data: &FungibleTokenPacketData) -> Result<(), ICS20Error> - where Ctx: ICS20Context -{ - match ack { - ICS20Acknowledgement::Sucess => Ok(()), - _ => refund_packet_token(ctx, data) - } -} - -/// Implements logic for refunding a sender on packet timeout or acknowledgement error -pub fn refund_packet_token(_ctx: &Ctx, _data: &FungibleTokenPacketData) -> Result<(), ICS20Error> - where Ctx: ICS20Context -{ - //... -} -``` - - -## Consequences - -### Positive - -- Provides more clarity on the details of implementing the ICS20 application in the `ibc` crate. -- Helps align closer with the ibc-go implementation[^3]. - -### Negative - -### Neutral - -## References - -[^1]: [ibc-go ADR 001: Coin Source Tracing](https://github.com/cosmos/ibc-go/blob/4271027a5ab1e6faaa2edbc2b9840209c315afab/docs/architecture/adr-001-coin-source-tracing.md) -[^2]: [ICS20 spec](https://github.com/cosmos/ibc/tree/master/spec/app/ics-020-fungible-token-transfer) -[^3]: [ibc-go's transfer module implementation](https://github.com/cosmos/ibc-go/tree/d31f92d9bf709f5550b75db5c70a3b44314d9781/modules/apps/transfer) diff --git a/docs/architecture/adr-009-chain-endpoint-handle-standardization.md b/docs/architecture/adr-009-chain-endpoint-handle-standardization.md deleted file mode 100644 index 9073842ea4..0000000000 --- a/docs/architecture/adr-009-chain-endpoint-handle-standardization.md +++ /dev/null @@ -1,51 +0,0 @@ -# ADR 009: ChainEndpoint and ChainHandle methods standardization - -## Status - -Accepted - The PR has been merged in [#2108](https://github.com/informalsystems/ibc-rs/pull/2108) - -## Changelog -* 2022-04-19: Initial Proposal - -## Context -There is a lot of common methods in the `ChainHandle` and `ChainEndpoint` traits, sometimes with minute differences between one another. This document provides a way to remove the duplication of methods for increased maintainability of the codebase, along with a few suggestions to standardize the method signatures. - -## Decision - -### Query methods parameters -There are currently discrepancies between how methods take their arguments. Some take a `request` object, and others take fine-grained arguments that will be used to build a `request` object in the implementation of the method. For example, `query_consensus_state()` takes arguments that will be used to build a request object, whereas `query_consensus_states()` takes a request object directly. -```rust -fn query_consensus_state( - &self, - client_id: ClientId, - consensus_height: Height, - query_height: Height, -) -> ...; - -fn query_consensus_states( - &self, - request: QueryConsensusStatesRequest, -) -> ...; -``` - -All methods will be refactored to take a request object as argument. - -### Query request objects -Currently, the type for the request objects is the "raw type", coming from the compiled protobuf files. For each such type, we will create a corresponding domain type, following a similar pattern as elsewhere in the codebase. - -This will allow us to modify the domain type as we wish, without requiring a change in the protobuf file (and thus, requiring a change in the communication protocol). A first such change of the domain type we foresee would alter the type to specify a height in queries; however this is out of scope for this particular ADR. - - -## Consequences - -### Positive -+ The protobuf types are not exposed directly, which allows `hermes` to work with future non-tendermint chains -+ Increased readability of the codebase; similar methods have a similar format - -### Negative - - -## References - -* [Option type should be used with non-zero Height #1009](https://github.com/informalsystems/ibc-rs/issues/1009) - + The new domain types proposed here, as well as the reduced deduplication of methods, will make fixing this issue easier diff --git a/docs/architecture/adr-template.md b/docs/architecture/adr-template.md deleted file mode 100644 index 28a5ecfbbc..0000000000 --- a/docs/architecture/adr-template.md +++ /dev/null @@ -1,36 +0,0 @@ -# ADR {ADR-NUMBER}: {TITLE} - -## Changelog -* {date}: {changelog} - -## Context - -> This section contains all the context one needs to understand the current state, and why there is a problem. It should be as succinct as possible and introduce the high level idea behind the solution. -## Decision - -> This section explains all of the details of the proposed solution, including implementation details. -It should also describe affects / corollary items that may need to be changed as a part of this. -If the proposed change will be large, please also indicate a way to do the change to maximize ease of review. -(e.g. the optimal split of things to do between separate PR's) - -## Status - -> A decision may be "proposed" if it hasn't been agreed upon yet, or "accepted" once it is agreed upon. If a later ADR changes or reverses a decision, it may be marked as "deprecated" or "superseded" with a reference to its replacement. - -{Deprecated|Proposed|Accepted} - -## Consequences - -> This section describes the consequences, after applying the decision. All consequences should be summarized here, not just the "positive" ones. - -### Positive - -### Negative - -### Neutral - -## References - -> Are there any relevant PR comments, issues that led up to this, or articles referrenced for why we made the given design choice? If so link them here! - -* {reference link} diff --git a/docs/architecture/architecture.md b/docs/architecture/architecture.md deleted file mode 100644 index ea549cfd8f..0000000000 --- a/docs/architecture/architecture.md +++ /dev/null @@ -1,130 +0,0 @@ -# Architecture - -This document describes the architecture of `ibc-rs`. If you're looking for a high-level overview of the code base, you've come to the right place! - -## Terms - -Some important terms and acronyms that are commonly used include: - - * **IBC**: Refers to the **I**nter**B**lockchain **C**ommunication protocol, a distributed protocol that allows different sovereign blockchains to communicate with one another. The protocol has both on-chain and off-chain components. - * **ICS**: Refers to **I**nter**C**hain **S**tandards, which are stadardization documents that capture the specifications of the IBC protocol across multiple documents. For example, ICS02 captures the client abstraction of the IBC protocol. - * **IBC module**: Refers to a piece of on-chain logic on an IBC-enabled chain. - * **Relayer**: Refers to an off-chain process that is responsible for relaying packets between chains. - * **Hermes**: Refers to the `ibc-rs` crate's particular relayer implementation. - -## Bird's Eye View - -![][layout-image] - -At its highest level, `ibc-rs` implements the InterBlockchain Communication protocol which is captured in [specifications in a separate repository][ibc-specs]. `ibc-rs` exposes modules that implement the specified protocol logic. The IBC protocol can be understood as having two separate components: on-chain and off-chain logic. The relayer, which is the main off-chain component, is a standalone process, of which Hermes is an implementation. On-chain components can be thought of as modules or smart contracts that run as part of a chain. The main on-chain components deal with the abstractions of clients, connections, and channels. - -## Code Map - -This section talks briefly about the various directories and modules in `ibc-rs`. - -### `modules`/`ibc` - -> Note: While the name of the directory is `modules`, the name of the crate is `ibc`. - -This crate contains the main data structures and on-chain logic of the IBC protocol; the fundamental pieces. There is the conceptual notion of 'handlers', which are pieces of code that each handle a particular type of message. The most notable handlers are the [client][ibc-client], [connection][ibc-connection], and [channel][ibc-channel] handlers. - -> Note: The naming of directories in the `ibc` crate follow a slightly different convention compared to the other crates in `ibc-rs`. This is because this crate implements the [ICS standards][ics-standards]. Modules in the `ibc` crate that implement a piece of the ICS standard are prefixed with the standard's designation. For example, the `modules/src/ics02_client` implements [ICS 02][ics02], which specifies the Client abstraction. These prefixes may be removed in the future. - -#### Core - -Consists of the designs and logic pertaining to the transport, authentication, and ordering layers of the IBC protocol, the fundamental pieces. - -##### ICS 02 - Client - -Clients encapsulate all of the verification methods of another IBC-enabled chain in order to ensure that the other chain adheres to the IBC protocol and does not exhibit misbehaviour. Clients "track" the metadata of the other chain's blocks, and each chain has a client for every other chain that it communicates with. - -##### ICS 03 - Connection - -Connections associate a chain with another chain by connecting a client on the local chain with a client on the remote chain. This association is pair-wise unique and is established between two chains following a 4-step handshake process. - -##### ICS 04 - Channel - -Channels are an abstraction layer that facilitate communication between applications and the chains those applications are built upon. One important function that channels can fulfill is guaranteeing that data packets sent between an application and its chain are well-ordered. - -##### ICS 05 - Port - -The port standard specifies an allocation scheme by which modules can bind to uniquely-named ports allocated by the IBC handler in order to facilitate module-to-module traffic. These ports are used to open channels and can be transferred or released by the module which originally bound them. - -##### ICS 23 - Commitment - -Commitments (sometimes called _vector commitments_) define an efficient cryptographic construction to prove inclusion or non-inclusion of values in at particular paths in state. This scheme provides a guarantee of a particular state transition that has occurred on one chain which can be verified on another chain. - -#### Applications - -Consists of various packet encoding and processing semantics which underpin the various types of transactions that users can perform on any IBC-compliant chain. - -##### ICS 20 - Fungible Token Transfer - -Specifies the packet data structure, state machine handling logic, and encoding details used for transferring fungible tokens between IBC chains. This process preserves asset fungibility and ownership while limiting the impact of Byzantine faults. - -#### Clients - -Consists of implementations of client verification algorithms (following the base client interface that is defined in `Core`) for specific types of chains. A chain uses these verification algorithms to verify the state of a remote chain. - -##### ICS 07 - Tendermint - -The Tendermint client implements a client verification algorithm for blockchains which use the Tendermint consensus algorithm. This enables state machines of various sorts replicated using the Tendermint consensus algorithm to interface with other replicated state machines or solo machines over IBC. - -#### Relayer - -Contains utilities for testing the `ibc` crate against the Hermes IBC relayer. It acts as scaffolding for gluing the `ibc` crate with Hermes for testing purposes. - -##### ICS 18 - Relayer - -Relayer algorithms are the "physical" connection layer of IBC — off-chain processes responsible for relaying data between two chains running the IBC protocol by scanning the state of each chain, constructing appropriate datagrams, and executing them on the opposite chain as allowed by the protocol. - -### `relayer` - -This crate provides the logic for relaying datagrams between chains. The process of relaying packets is an off-chain process that is kicked off by submitting transactions to read from or write to an IBC-enabled chain's state. More broadly, a relayer enables a chain to ascertain another chain's state by accessing its clients, connections, channels, or anything that is IBC-related. - -### `relayer-cli` - -A CLI wrapper around the `relayer` crate for running and issuing commands to a chain via a relayer. This crate exposes the Hermes binary. - -### `relayer-rest` - -An add-on to the CLI mainly for exposing some internal runtime details of Hermes for debugging and observability reasons. - -### `proto` - -Depends on the `proto-compiler` crate's generated proto files. - -Consists of protobuf-generated Rust types which are necessary for interacting with the Cosmos SDK. Also contains client and server methods that the relayer library includes for accessing the gRPC calls of a chain. - -### `proto-compiler` - -CLI tool to automate the compilation of proto buffers, which allows Hermes developers to go from a type specified in proto files to generate client gRPC code or server gRPC code. - -### `telemetry` - -Used by Hermes to gather telemetry data and expose it via a Prometheus endpoint. - -## Cross-Cutting Concerns - -### Testing - -Most of the components in the `ibc` crate (i.e. the `modules` directory) have basic unit testing coverage. These unit tests make use of mocked up chain components in order to ensure that message payloads are being sent and received as expected. - -We also run end-to-end tests to more thoroughly test IBC modules in a more heterogenous fashion. - -### Error Handling - -Most errors occur within the relayer as a result of either I/O operations or user misconfiguration. I/O-related errors can be sub-categorized into web socket errors and chain RPC errors. The latter occur when full nodes are out of sync with the rest of the network, which result in transactions that are based off of conflicting chain states. Such errors are usually either resolved by retrying the transaction, or might require operator intervention in order to flush the transaction from the mempool in conjunction with restarting the full node. - -The [flex-error][flex-error] library is the main tool used to handle errors in the code. This [demo][flex-error-demo] showcases some of the main patterns of how `flex-error` is used. For a more real-world example, [this][relayer-errors] file defines all of the possible errors for the relayer. - -[flex-error]: https://github.com/informalsystems/flex-error -[flex-error-demo]: https://github.com/informalsystems/flex-error/blob/master/flex-error-demo-full/src/main.rs -[ibc-specs]: https://github.com/cosmos/ibc#interchain-standards -[ics-standards]: https://github.com/cosmos/ibc#standardisation -[ibc-client]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core/ics02_client -[ibc-connection]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core/ics03_connection -[ibc-channel]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core/ics04_channel -[ics02]: https://github.com/cosmos/ibc/blob/master/spec/core/ics-002-client-semantics/README.md -[layout-image]: assets/ibc-rs-layout.png -[relayer-errors]: https://github.com/informalsystems/ibc-rs/blob/master/relayer/src/error.rs diff --git a/docs/architecture/assets/IBC_client_heights.jpeg b/docs/architecture/assets/IBC_client_heights.jpeg deleted file mode 100644 index b24e4d997d..0000000000 Binary files a/docs/architecture/assets/IBC_client_heights.jpeg and /dev/null differ diff --git a/docs/architecture/assets/IBC_conn_handshake_relay.jpeg b/docs/architecture/assets/IBC_conn_handshake_relay.jpeg deleted file mode 100644 index 8c6964c4c5..0000000000 Binary files a/docs/architecture/assets/IBC_conn_handshake_relay.jpeg and /dev/null differ diff --git a/docs/architecture/assets/IBC_relayer.jpeg b/docs/architecture/assets/IBC_relayer.jpeg deleted file mode 100644 index 9c470d3555..0000000000 Binary files a/docs/architecture/assets/IBC_relayer.jpeg and /dev/null differ diff --git a/docs/architecture/assets/IBC_relayer_threads.jpeg b/docs/architecture/assets/IBC_relayer_threads.jpeg deleted file mode 100644 index 543a29e5d6..0000000000 Binary files a/docs/architecture/assets/IBC_relayer_threads.jpeg and /dev/null differ diff --git a/docs/architecture/assets/ibc-rs-layout.png b/docs/architecture/assets/ibc-rs-layout.png deleted file mode 100644 index 9e68bae5f8..0000000000 Binary files a/docs/architecture/assets/ibc-rs-layout.png and /dev/null differ diff --git a/docs/architecture/assets/relayer-v0-arch.jpg b/docs/architecture/assets/relayer-v0-arch.jpg deleted file mode 100644 index 3b574551e5..0000000000 Binary files a/docs/architecture/assets/relayer-v0-arch.jpg and /dev/null differ diff --git a/docs/architecture/assets/relayer-v0-link.jpeg b/docs/architecture/assets/relayer-v0-link.jpeg deleted file mode 100644 index 166f72307e..0000000000 Binary files a/docs/architecture/assets/relayer-v0-link.jpeg and /dev/null differ diff --git a/docs/disclosure-log.md b/docs/disclosure-log.md deleted file mode 100644 index 4a5df43072..0000000000 --- a/docs/disclosure-log.md +++ /dev/null @@ -1,358 +0,0 @@ -# Disclosure Log for IBC Protocols - -This document is a record of all the bugs or issues we uncovered while specifying & formally verifying the IBC protocols. - - -### 1. ICS3 liveness problem due to ICS018 relayer algorithm - -The algorithm for relaying connection handshake datagrams of type `ConnOpenTry`does not handle the situation when both chains are in state `INIT`. -The current relayer algorithm in [ICS018](https://github.com/cosmos/ibc/tree/19f519b2d6829e3096d6b9f79bffb7836033e79c/spec/relayer/ics-018-relayer-algorithms) specifies that the `ConnOpenTry` datagram should be relayed only if one of the chains is in state `INIT` and the other chain is uninitialized (see the snippet below); this is not enough for guaranteeing liveness of the connection handshake protocol (ICS04). - -``` - if (localEnd.state === INIT && remoteEnd === null) -``` - -The correct code should include both the cases when a single chain is in state `INIT`, as well as the case when both chains are in state `INIT`, as specified here: [Relayer.tla](https://github.com/informalsystems/ibc-rs/blob/e1b78946529e39a5c709ccd6d11637993073164e/docs/spec/relayer/Relayer.tla#L174) -This fix only concerns the relayer algorithm ICS018. - -##### Channel handshake (ICS4) liveness problem - -The same issue (and fix) seems to exist for the channel handshake datagrams. - - -### 2. ICS3 liveness problem due to `UpdateClient` semantics - -This problem is not specific to the connection handshake protocol (ICS3) itself, but is a bug in the way the relayers use the `UpdateClient` action. -We classify this under ICS3, however, since this is the context where we discovered the problem. -The TLA+ spec we have for depicting this liveness problem is for the ICS3 protocol. - -##### Problem statement - -Related issues: [#71](https://github.com/informalsystems/ibc-rs/issues/71) and [#61](https://github.com/informalsystems/ibc-rs/issues/61). -The problem is more thoroughly described in #61, but for the sake of completenes we restated it here in a compact form. - -The liveness property that a correct relayer should provide is eventual delivery. -Assuming some source chain `A`, destination chain `B`, and an IBC item `X` (e.g., connection, channel, or packet) on chain `A`, we can define this property as follows: - -> For any IBC item `X` on chain `A` destined for chain `B`, eventually, a correct relayer will submit item `X` to chain `B`. - -This is difficult to guarantee in practice, however. -Intuitively, the difficulty arises because of a combination of two factors: - -1. __Proof requirement:__ For chain `B` to accept item `X`, it must verify the authenticity of this item; this is done via the light client that chain `B` maintains. -Given an item `X` and a commitment proof for `X` constructed at height `h-1`, the light client requires the consensus state at height `h` that includes that commitment root required for verification. - -2. __Concurrency:__ Different relayers may update the same light client. -Suppose a relayer `r1` wants to submit a consensus state at height `h`. -In the meantime, however, another relayer `r2` may update this same light client to height `h'`. -Assume `h'` is bigger than `h`. -If the light client disallows updates with heights smaller than the current height `h'` then `r1`'s update fails . -Consequently, the relayer will be unable to submit consensus state at height `h`. - -To ensure eventual delivery, relayer `r1` would need to retry submitting item `X`, that is: resubmit the consensus state at a larger height (e.g., at `h'`) followed by the message that includes the proof for `X` (e.g., at `h'-1`). -This retry mechanism was adoped as a solution for the [current relayer implementation](https://github.com/informalsystems/ibc-rs/blob/master/docs/architecture/adr-002-ibc-relayer.md#ibc-client-consensus-state-vs-relayer-light-client-states-vs-chain-states). -Note that it is also possible for relayer `r2` to have submitted the same item `X` successfully; in this case, the liveness problem does not actually surface. - - -##### TLA+ trace - -> Note that the TLA+ spec below may change in time. Here we refer to the spec as [existing at this commit](https://github.com/informalsystems/ibc-rs/tree/788c36be9e14725c542bd586b4fe4593edb3ca80/docs/spec/connection-handshake/L2-tla) (unchanged up to [release 0.0.2](https://github.com/informalsystems/ibc-rs/releases/tag/v0.0.2)). - -To obtain an execution in TLA+ that depicts the above liveness problem, it is sufficient to enable the `Concurrency` flag in the L2 default TLA+ spec for ICS3. -This spec is located in [spec/connection-handshake/L2-tla/](spec/connection-handshake/L2-tla/). -In this spec we make a few simplifications compared to the real system, most importantly: to verify an item at height `h`, a light client can use the consensus state at the same height `h` (no need for smaller height `h-1`). -Below we summarize the parameters as well as the sequence of actions that lead to the liveness problem. - -###### Parameters: - -- `MaxBufLen <- 2` -- `MaxHeight <- 8` -- `Concurrency <- TRUE` -- Behavior spec: Temporal formula `Spec` -- Check for `Deadlock`, Invariants `TypeInvariant` and `ConsistencyProperty`, as well as Property `Termination` - -###### Trace: - -Both chains `A` and `B` start at height `1`, and the light client on each chain has consensus state for height `1`. - -1. The environment submits a `ICS3MsgInit` message to chain `A`. - -2. Chain `A` processes the `ICS3MsgInit`, advances to height `2`, and prepares a `ICS3MsgTry` message destined for chain `B`. -The proof in this message is for height `2`. - -3. The environment triggers the `AdvanceChainHeight` action of chain `B`, so this chain transitions from height `1` to height `2`. - -4. The environment triggers the `AdvanceChainHeight` action of chain `A`, so this chain transitions from height `2` to height `3`. - -5. The environment triggers the `AdvanceChainHeight` action of chain `A`, so this chain transitions from height `3` to height `4`. - -6. __Concurrency:__ The environment triggers the `UpdateClient` action on chain `B`: the light client on this chain is updated with height `4` (that is, the latest height of chain `A`), and chain `B` also transitions from height `2` to height `3`. - -7. The environment passes (i.e., relays) the `ICS3MsgTry` message to chain `B`. -Recall that this message has proofs for height `2`; consenquently, the environment also attempts to trigger `UpdateClient` action on chain `B` for consensus state at height `2`. -This action does not enable because the light client on `B` has a more recent consensus state for height `4`. - -8. Chain `B` attempts to process the `ICS3MsgTry` but is unable to verify its authenticity, since the light client on this chain does not have the required consensus state at height `2`. -Chain `B` drops this message. - -From this point on, the model stutters, i.e., is unable to progress further in the connection handshake protocol. - - -### 3. ICS3 problems due to version negotiation - -__Context__. -The original issue triggering this discussion is here: [cosmos/ics/#459](https://github.com/cosmos/ibc/issues/459). -Briefly, version negotiation in the ICS3 handshake can interfere in various ways, breaking either the safety or liveness of this protocol. -Several solution candidates exist, which we classify by their "mode", i.e., a strategy for picking the version at some point or another in the protocol. -For a full description of the modes, please consult [L2-tla/readme.md#version-negotiation-modes](spec/connection-handshake/L2-tla/README.md#version-negotiation-modes). - -__Overview__. -Below we use TLA+ traces to explore and report on the exact problems that can occur. We also show how the solution candidates fare. -The table below summarizes our results for the four cases we consider: - -| Case | Property violation | -|---------|----------------------------| -| (a) Empty version intersection | liveness| -| (b) Mode `overwrite` | safety| -| (c) Mode `onTryNonDet` | liveness| -| (d) Mode `onAckNonDet` | safety| - - -These are the main takeaways from this discussion: - -1. The set of compatible versions that chains start off with (return values of `getCompatibleVersions()` in ICS3) have to intersect, otherwise a liveness issue occurs. This assumption is independent of the version negotiation mode. We report this in __case (a)__ below. -2. Modes "overwrite", "onTryNonDet", and "onAckNonDet" all result in breaking the handshake protocol. See __cases (b), (c), and (d)__ below for traces. -3. The deterministic modes "onTryDet" and "onAckDet" pass model checking, so a solution should be chosen among these two candidates (see the [original issue](https://github.com/cosmos/ibc/issues/459) for follow-up on the solution). - -##### Case (a). Empty version intersection causes liveness issue - -Model checking details in TLA+: -- Model parameters: -``` -Concurrency <- FALSE -MaxBufLen <- 2 -MaxHeight <- 7 -MaxVersionNr <- 2 -VersionPickMode <- "overwrite" -``` -- Check for _Deadlock_ and property _Termination_. - -Outcome: -- Model checking halts with exception "Temporal properties were violated." - -###### Trace - -The two chains start off with different versions (`1` for A, and `2` for B). -So the __compatible version__ sets on these chains do not intersect. - -1. The environment submits a `ICS3MsgInit` message chain `A`. - -2. The environment triggers the `AdvanceChainHeight` action of chain `B`, so this chain transitions from height `1` to height `2`. - -3. The environment triggers the `AdvanceChainHeight` action of chain `B`, so this chain transitions from height `2` to height `3`. - -4. The environment triggers the `AdvanceChainHeight` action of chain `A`, so this chain transitions from height `1` to height `2`. - -5. Chain `A` processes the `ICS3MsgInit`, advances to height `3`, and prepares a `ICS3MsgTry` for chain `B`. -The version in this message is `<<1>>`, the same as the version field that chain `A` started with. - -7. The environment relays the `ICS3MsgTry` message to the input buffer of chain `B`. -This message has proofs for height `3` so chain `B` gets updated with consensus state for height `4`. -With this update, chain `B` also advances to height `4`. - -8. Chain `B` drops the `ICS3MsgTry` message because the version field does not match any of the compatible versions of this chain. -Therefore, the model cannot progress. - -###### Fix - -To fix this issue, the model requires an explicit assumption that the compatible versions on the two chains must have a non-empty intersection. -We capture this assumption in the `Init` action, via the `ChainVersionsOverlap` predicate: - -```tla -Init == - /\ chmA!Init - /\ chmB!Init - /\ ChainVersionsOverlap(storeChainA, storeChainB) - /\ InitEnv -``` - -Once we add the `ChainVersionsOverlap` assumptions, this model no longer has liveness issues. -But the "overwrite" mode can lead to safety problems, however, which we document below. - -##### Case (b). Mode `overwrite` causes safety issue - -Model checking details in TLA+: -- Model parameters: -``` -Concurrency <- FALSE -MaxBufLen <- 2 -MaxHeight <- 7 -MaxVersionNr <- 2 -VersionPickMode <- "overwrite" -``` -- Check for invariant _VersionInvariant, as well as _Deadlock_ and property _Termination_. -- Make sure the `Init` action includes the `ChainVersionsOverlap` predicate. - -Outcome: -- Model checking halts with exception "Invariant VersionInvariant is violated." - -###### Trace - -Both chains `A` and `B` start with the compatible versions `<<1, 2>>`. - -1. The environment submits a `ICS3MsgInit` message to both chains. - -2. Chain `A` processes the `ICS3MsgInit`, advances to height `2`, and prepares a `ICS3MsgTry` for chain `B`. -The versions in this message are `<<1, 2>>`, the same as the version field in chain `A`. -The connection on this chain goes from state `UNINIT` to state `INIT`. - -3. Chain `B` processes the `ICS3MsgInit`, advances to height `2`, and prepares a `ICS3MsgTry` for chain `A`. -The versions in this message are `<<1, 2>>`, the same as the version field in chain `B`. -The connection on this chain goes from state `UNINIT` to state `INIT`. - -4. The environment relays the `ICS3MsgTry` message to the input buffer of chain `B`. -This message has proofs for height `2`, so the environment triggers `UpdateClient` on chain `B` for consensus state at height `2`. -With this update, chain `B` advances to height `3`. - -5. The environment relays the `ICS3MsgTry` message to the input buffer of chain `A`. -This message has proofs for height `2`, so the environment triggers `UpdateClient` on chain `A` for consensus state at height `2`. -With this update, chain `A` advances to height `3`. - -6. Chain `A` processes the `ICS3MsgTry` message, advances to height `4`, and prepares a `ICS3MsgAck` message for `B`. -The version in this message is `1`. -The connection in this chain goes into state `TRYOPEN`, with version chosen to be `1`. - -7. Chain `B` processes the `ICS3MsgTry` message, advances to height `4`, and prepares a `ICS3MsgAck` message for `A`. -The version in this message is `2`. -The connection in this chain goes into state `TRYOPEN`, with version chosen to be `2`. - -8. The environment relays the `ICS3MsgAck` message to the input buffer of chain `B`. -This message has proofs for height `4`, so the environment triggers `UpdateClient` on chain `B` for consensus state at height `4`. -With this update, chain `B` advances to height `5`. - -9. Chain `B` processes the `ICS3MsgAck` message (which had version `1` -- see step 6 above), advances to height `6`, and prepares a `ICS3MsgConfirm` message for `A`. -Chain `B` overwrites its local version (namely, `2`) with the version in the `ICS3MsgAck` message (that is, `1`). -The connection in this chain goes into state `OPEN`, with version chosen to be `1`. -The `ICS3MsgConfirm` that chain `B` creates contains version `1`. - -10. The environment relays the `ICS3MsgAck` message to the input buffer of chain `A`. -This message has proofs for height `4`, so the environment triggers `UpdateClient` on chain `A` for consensus state at height `4`. -With this update, chain `A` also advances to height `5`. - -11. Chain `A` processes the `ICS3MsgAck` message; recall that the version in this message is `2` (see step 7 above). -Upon processing this message, chain `A` overwrites its local version (which was `1`) with the version in the `ICS3MsgAck` message (concretely, `2`). -The connection in this chain goes into state `OPEN`, with version chosen to be `2`. -Chain `A` also advances to height `6` and prepares a `ICS3MsgConfirm` message for `B`; the `ICS3MsgConfirm` contains version `2`. - -At this point, the connection is `OPEN` at both chains, but the version numbers do not match. -Hence, the invariant `VersionInvariant` is violated. - - -##### Case (c). Mode `onTryNonDet` causes liveness issue - -Setup: -- Model parameters: -``` -Concurrency <- FALSE -MaxBufLen <- 2 -MaxHeight <- 7 -MaxVersionNr <- 2 -VersionPickMode <- "onTryNonDet" -``` -- Check for _Deadlock_ and property _Termination_. - -Outcome: -- Model checking halts with exception "Temporal properties were violated." -- The issue is that the version in the two chains diverges and can never reconcile - -###### Trace - -Both chains `A` and `B` start with the field `<<1, 2>>`, that is, with two compatible versions. - -1. The environment submits a `ICS3MsgInit` message to both chains. - -2. The environment triggers the `AdvanceChainHeight` action of chain `A`, so this chain transitions from height `1` to height `2`. - -3. The environment triggers the `AdvanceChainHeight` action of chain `A`, so this chain transitions from height `2` to height `3`. - -4. Chain `B` processes the `ICS3MsgInit`, advances to height `2`, and prepares a `ICS3MsgTry` message destined for chain `A`. -The versions in this message are `<<1, 2>>`, the same as the version field in chain `B`. - -5. The environment triggers the `AdvanceChainHeight` action of chain `B`, so this chain transitions from height `2` to height `3`. - -6. Chain `A` processes the `ICS3MsgInit`, advances to height `4`, and prepares a `ICS3MsgTry` message destined for chain `B`. -The versions in this message are `<<1, 2>>`, the same as the version field in chain `A`. - -7. The environment passes (i.e., relays) the `ICS3MsgTry` message to the input buffer of chain `B`. -This message has proofs for height `4`; consequently, the environment also triggers `UpdateClient` on chain `B` for consensus state at height `4`, preparing this chain to process the message in the input buffer. -With this update, chain `B` advances to height `4`. - -8. The environment passes (i.e., relays) the `ICS3MsgTry` message to the input buffer of chain `A`. -This message has proofs for height `2`, so the environment also does a `UpdateClient` on chain `A` for consensus state at height `2`. -With this update, chain `A` advances to height `5`. - -9. Chain `A` processes the `ICS3MsgTry`, advances to height `6`, and prepares a `ICS3MsgAck` for chain `B`. -The version in this message is `<<2>>`, which is the version which chain `A` choose non-deterministically for this connection. -The connection on chain `A` is now in state `TRYOPEN`. - -10. Chain `B` processes the `ICS3MsgTry`, advances to height `5`, and prepares a `ICS3MsgAck` for chain `A`. -The version in this message is `<<1>>`, which chain `B` choose non-deterministically for this connection. -The connection on chain `B` is now in state `TRYOPEN`. - -From this point on, the two chains can not make further progress in the handshake, since they chose different versions. -Neither of the two chains can process the `ICS3MsgAck` message because the version in this message does not match with the version the chain stores locally. -(A chain should not overwrite its local version either, otherwise the safety issue from case (b) can appear.) -Therefore, the model stutters (cannot progress anymore). - -##### Case (d). Mode `onAckNonDet` causes safety issue - -Model checking details in TLA+: -- Model parameters: -``` -Concurrency <- FALSE -MaxBufLen <- 2 -MaxHeight <- 7 -MaxVersionNr <- 2 -VersionPickMode <- "onAckNonDet" -``` -- Check for invariant _VersionInvariant, as well as _Deadlock_ and property _Termination_. - -Outcome: -- Model checking halts with exception "Invariant VersionInvariant is violated." - -###### Trace - -Both chains `A` and `B` start with the compatible versions `<<1, 2>>`. - -1. The environment submits a `ICS3MsgInit` message chain `A`. - -2. Chain `A` processes the `ICS3MsgInit`, advances to height `2`, and prepares a `ICS3MsgTry` for chain `B`. -The versions in this message are `<<1, 2>>`, the same as the version field in chain `A`. -The connection on this chain goes from state `UNINIT` to state `INIT`. - -3. The environment relays the `ICS3MsgTry` message to the input buffer of chain `B`. -This message has proofs for height `2`, so the environment triggers `UpdateClient` on chain `B` for consensus state at height `2`. -With this update, chain `B` advances to height `2`. - -4. Chain `B` processes the `ICS3MsgTry` message, advances to height `3`, and prepares a `ICS3MsgAck` message for `A`. -The version in this message is `<<1, 2>>`. -The connection in this chain goes into state `TRYOPEN`; chain `B` does not choose a specific version yet, so the connection on `B` still has versions `<<1, 2>>`. - -5. The environment relays the `ICS3MsgAck` message to the input buffer of chain `A`. -This message has proofs for height `3`, so the environment triggers `UpdateClient` on chain `A` for consensus state at height `3`. -With this update, chain `A` advances to height `3`. - -6. Chain `A` processes the `ICS3MsgAck` message (which has versions `<<1, 2>>`), advances to height `4` and prepares a `ICS3MsgConfirm` message for `B`. -Chain `A` locks on version `1` (non-deterministic choice between `<<1, 2>>`), which it also reports in the `ICS3MsgConfirm` message. -The connection in this chain goes into state `OPEN`, with version chosen to `1`. - -7. The environment relays the `ICS3MsgConfirm` message to the input buffer of chain `B`. -This message has proofs for height `4`, so the environment triggers `UpdateClient` on chain `B` for consensus state at height `4`. -With this update, chain `B` also advances to height `4`. - -8. Chain `B` processes the `ICS3MsgConfirm` message (which contains version `1`). -Chain `B` locks on version `2` (non-deterministic choice between its local versions `<<1, 2>>`). -The connection in this chain goes into state `OPEN`. - -At this point, the connection is `OPEN` at both chains, but the version numbers do not match. -Hence, the invariant `VersionInvariant` is violated. diff --git a/docs/spec/README.md b/docs/spec/README.md deleted file mode 100644 index 6adf6bb24d..0000000000 --- a/docs/spec/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Specification and verification of IBC protocols - - * [tla](./tla) comprises TLA+ specification for the IBC specification. - - * [connection-handshake](./connection-handshake) [Deprecated] contains English and TLA+ specifications for the IBC Connection Handshake Protocol (ICS 003). - - * [relayer](./relayer) contains English specification of the relayer. diff --git a/docs/spec/connection-handshake/CHANGELOG.md b/docs/spec/connection-handshake/CHANGELOG.md deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docs/spec/connection-handshake/L1_2.md b/docs/spec/connection-handshake/L1_2.md deleted file mode 100644 index 62bd5bcb75..0000000000 --- a/docs/spec/connection-handshake/L1_2.md +++ /dev/null @@ -1,628 +0,0 @@ -# IBC Connection Handshake (ICS3) English Spec - -## L1. Problem statement / outside view - -Suppose there are two chains (e.g., distributed ledgers) that execute independently of each other. -What should be an abstraction for representing a path of communication between the two? -We use a __connection__ abstraction to represent this path of communication (see the [ICS 003 spec](#references)). - -The IBC connection handshake protocol, at the highest level of abstraction, enables two chains to establish -- i.e., __open__ -- a connection with each other. -Once open, connections cannot be closed, so a closing handshake does not exist. -Whenever we speak of connection handshake, we implicitly mean the __opening__ handshake. -Throughout the rest of this document, we will often use the abbreviation "ICS3" to stand for __connection handshake__ problem (or protocol, depending on the context). - -A connection involves the two __parties__ (the respective chains) involved in the communication, as well as a __relayer__ which handles message transmission between the chains. -The relayer carries a central responsibility in handshaking and maintaining a connection. -That is, a connection handshake is essentially a three-party protocol: two chains, plus a relayer. -This document focuses on the functionality of the two chains, while providing a minimal description of the relayer. - -#### Sequential problem statement - -###### Definitions -A chain comprises three elements that are relevant for connections: - -- a __module__, or the "connection handshake module" or "ICS3 module": this is a process (running as a component of the chain) which implements the ICS3 protocol; -- the __store__ of the chain, alternatively called the "consensus state" or just "state" of the chain; -- a __client__: a process running on the chain, which represents a window onto the store of the other chain. - -We give concrete names, Alice and Bob, to the chains involved in our problem, to simplify description. -The diagram below sketches the system model of the connection handshake problem. -Note that the _client_ on Alice points to the _store_ on Bob; vice versa is true for the _client_ on Bob (but there is no corresponding arrow to keep the figure cleaner). -Alice and Bob have no direct medium for communicating with each other, so a relayer process sits between the two chains and enables their communication (more details on this will follow). - - - -``` - IBC Connection Handshake - High-level Model - Alice Bob - +-----------+ +-----------+ - | Chain | +---------+ | Chain | - | |<--->| Relayer |<--->| | - | +-------+ | +---------+ | +-------+ | - | |Module | | | |Module | | - | +-------+ | | +-------+ | - | +-------+ | | +-------+ | - | | Store | | -->| Store | | - | +-------+ | -------/ | +-------+ | - |+---------+| --------/ |+---------+| - || Client |---/ || Client || - |+---------+| |+---------+| - +-----------+ +-----------+ -``` - -###### Problem statement and guarantees -A connection handshake allows, briefly, that the modules on two chains agree on a consistent view of their state (i.e., their store), and each module allocates a new connection associated with this state. - -We identify two basic (high-level) requirements that a connection handshake should guarantee: - -- **[ICS3-Seq-1-Agreement]** If Alice finishes a handshake by opening a new connection `c1` with Bob, and Bob finishes the handshake opening the connection `c2`, then `c1 == c2`. -In other words, if a connection handshake finishes, the two parties have a consistent view of this connection. We say that Alice stores one end of the connection, label `c1`, and Bob stores the other end of the same connection, label `c2`. - -- **[ICS3-Seq-2-Termination]** Eventually, the modules of Alice and Bob should both finish the handshake, opening a connection. -Opening a connection means that Alice and Bob both allocate in their local store a new connection. - -Requirement **[ICS3-Seq-1-Agreement]** represents the basic safety guarantee, while **[ICS3-Seq-2-Termination]** is the basic liveness guarantee we would like to have. - - -## L2. Protocol specification / protocol view - -### 2.1 System model specification - -#### System model - -A **chain** is a deterministic state machine. -Each chain may be replicated, i.e., consisting of a replicated state machine (RSM), or it may be a standalone process. -As a side note, there are certain requirements a chain must satisfy in practice(e.g., see [[ICS 024](#references)]); at this level of specification, however, we model each chain as consisting of those three components we described earlier: module, store, and a client. - -In ICS3, __actors__ represent entities that may trigger this protocol and provide external feedback to the protocol. -To quote from IBC terminology: - -> An actor, or a user (used interchangeably), is an entity interacting with the IBC protocol. An actor can be a human end-user, a module or smart contract running on a blockchain, or an off-chain relayer process capable of signing transactions. [[IBC terminology](#references)] - -###### Communication assumptions - -A relayer intermediates communication between Alice and Bob. -We model communication as a shared mutable state. -Each chain exposes an API comprising read, write, as well as a queue (FIFO) functionality. -So there are two parts to the communication API: - - 1. a read/write store: - - this holds the entire state of the chain; - - each module can write to this store, and we primarily concerned with the function `setConnection` that write a new connection into the store; - - external processes -- in our case, any relayer -- can read from the store via a function `getConnection`; - - 2. a queue of datagrams, or messages: - - each module can `pop` datagrams stored in this queue; - - external processes (relayers) can `push` to this queue. - -We describe the API in the code snippet below. -Connection handshake modules implement this interface. -Note that we use the modifiers 'private' and 'public' to denote which API function can be invoked by external processes (e.g., relayer) and which functions are private to the module implementing this interface. -The data types (`Identifier` and `ConnectionEnd`) are defined below. - -```golang -type CommunicationLayer interface { - // Write a new value (a connection) to the local store, indexed by the - // identifier of the connection on the local party. - private setConnection(i Identifier, e ConnectionEnd) - - // Relayer invokes this to `read` from a chains' local store. - public getConnection(i Identifier) -> ConnectionEnd - - // Relayer invokes this to append a datagram into a chain's - // local store. - public push("outstandingDatagrams", g Datagram) - - // A module consumes elements from its local store - // "outstandingDatagrams". - private pop("outstandingDatagrams") -> Datagram - // The typical use case is that for each datagram, this module invokes the - // corresponding handler, e.g., ConnTryHandler. -} -``` - -Preconditions: - - the datagram `g` as well as Identifier `i` and Connection `e` (respectively for `push`, `setConnection` and `getConnection`) are non-null; - -Postconditions: - - for `setConnection`: the local store on the module implementing this function stores at position `i` the value `e`; - - for `push`: the `outstandingDatagrams` queue will append the Datagram `g` - -Error conditions: - - `getConnection` may fail (returning `null`) if no `ConnectionEnd` exists for the input parameter `i`; - - before invoking the function `setConnection`, the caller should make sure that the client in the `Identifier` field was created; - - -Properties of the communication layer: - -- **[ICS3-CommAssumption-1-RWIntegrity]** If some connection `e` is returned from `getConnection(i)` then the `setConnection(i, e)` was previously invoked. - -- **[ICS3-CommAssumption-2-QueueIntegrity]** If some module `d` returns a datagram `g` from `Pop("outstandingDatagrams")`, then a process previously invoked `Push("outstandingDatagrams", g)`. - -- **[ICS3-CommAssumption-3-QueueNoLoss]** If some process invokes `Push("outstandingDatagrams", g)`, then eventually a `Pop` operation will return `g`. - - -Refinement remarks (relation to L3): - - The `push` function is a more abstract version of `submitDatagram` ([ICS 018](#references)). - - The `setConnection` function is a more abstract version of the functionality accomplished by `provableStore.set` and `addConnectionToClient` ([ICS 003](#references)). - - The `getConnection` function is the analogy to `getConnection` ([ICS 018](#references)). - - -###### Fault assumptions -The chains Alice and Bob (and their module, local store, and client, respectively) may be subject to arbitrary (Byzantine) faults. -The properties we define below are guaranteed to hold iff both Alice and Bob behave correctly, i.e., do not deviate from any protocol we describe below. -Beside these two parties, the system comprises an arbitrary number of other parties, any of which may be Byzantine; these other chains may, for instance, try to impersonate Alice or Bob or attack them in some way. -The termination property of the ICS3 protocol depends on the existence of at least one correct relayer. -Additionally, there may be an arbitrary number of relayers that can communicate (i.e., [read/push](#communication-assumptions)) to Alice and Bob and are subject to Byzantine faults. - - -###### Additional remarks on the system model -We explicitly overlook here two assumptions of synchrony that may be necessary for achieving liveness of **other** protocols, but they are not necessary within the ICS3 protocol. -These assumptions are: (i) partial synchrony is necessary for achieving liveness of a chain if that chain is implemented using a deterministic consensus protocol (e.g., Tendermint Core); (ii) synchrony is necessary for guaranteeing message delivery in any connection-oriented protocol such as TCP, which is most likely the underlying network transport layer (implementing the `CommunicationLayer` between modules and relayers). - - -#### Properties - -###### Preliminary abstractions -We introduce several abstractions that are relevant at this point: - -1. A `ConnectionParty` is a data type encapsulating the details that identify a party. - -```golang -type ConnectionParty interface { - connectionIdentifier Identifier - clientIdentifier Identifier - prefix Prefix -} -``` - -2. A `ConnectionState` captures the state of a connection, and may have one of the following values. - -```typescript -enum ConnectionState { - UNINIT, - INIT, - TRYOPEN, - OPEN, -} -``` - -3. A `ConnectionEnd` is a data type that captures all the details of a connection at a party. -This includes a _remote_ and a _local_ field, so that the local party is the one storing this object, and the remote party is the other one. -One of the goals of the connection handshake protocol is to allocate an object of this type at each of Alice and Bob. - -```golang -type ConnectionEnd interface { - state ConnectionState - remoteParty ConnectionParty - localParty ConnectionParty - version []String -} -``` - - -4. The `ConnProof` and `ClientProof` are two data types that abstracts over the details of a cryptographic proof that any module can create, and another module can verify. -Intuitively, a proof is helpful for some module to guarantee that it is in a certain state. -We are concerned with connection proofs (`ConnProof` type) and client proofs (`ClientProof`) here. - -```golang -type ConnProof struct { - proof CommitmentProof // The proof for the connection that the remote party claims to have. -} -``` - -```golang -type ClientProof struct { - proof CommitmentProof // The proof for the client that the remote party claims to have. - height uint64 // The height which the client (on remote party) claims having. -} -``` - -We now restate the connection handshake problem in a slightly more precise (or lower level) formulation: - -__A connection handshake enables two ICS3 modules to agree on a consistent view of their chain state, to verify each other's state, and to allocate a new connection.__ - - -###### Guarantees -We refine the safety guarantee **[ICS3-Seq-1-Agreement]** (defined in the [sequential problem statement](#sequential-problem-statement)) by splitting this into three complementary safety properties: - -- **[ICS3-Proto-1-ConnectionUniqueness]** -A module accepts (i.e., initializes on) a `ConnectionEnd` `e` at most once. - -- **[ICS3-Proto-2-ConnectionIntegrity]** -If any two modules open a connection `e`, then either one module or the other or both modules accepted (i.e., initialized with) `e`. - -- **[ICS3-Proto-3-StateConsistency]** -If any two modules open a connection, then the client in the first module is consistent with the state of the second module. - -The liveness property below is equivalent to **[ICS3-Seq-2-Termination]** (the livenes property in the sequential specification above): - -- **[ICS3-Proto-4-Termination]** -If the two parties involved in a connection handshake are correct, then the connection handshake eventually terminates. -Specifically, termination implies that each module allocates in the local store a new `ConnectionEnd` object with `state` field set to `OPEN`. - - -_Remarks_: - -- Uniqueness property essentially provides a safeguard against overwritting a connection in the store with some new set of parameters. - -- The integrity property, in conjunction with uniqueness, ensures that there is continuity between the connections that a module initializes and the connections that this module opens. - -- One of the mechanisms that enforces the connection integrity property consists of cryptographic proofs, specifically the `ConnProof` type, which asserts that a certain chain stores a certain connection in a certain state. - -- The `ClientProof` type guarantees the state consistency property, by asserting that the client on a chain is updated to a consensus state of the remote chain. - -- Note that the consistency property works both ways. -This property applies to _any two modules_ (there is no predefined "first" or "second" module here). - - -### 2.2 Protocol - -The ICS3 protocol comprises four steps, summarized below. - -1. An actor invokes the `ConnInitHandler` handler at the ICS3 module in one of the chains; this sets off the connection handshake protocol. -In our example, we will use Alice as the party to execute `ConnInitHandler`. -Once Alice does so, this handler marks the connection on her end as initialized (i.e., `INIT`). - -2. This comprises two sub-steps: - - 2.a. Upon observing that Alice has executed `ConnInitHandler` (meaning that her state contains a connection that is `INIT`), the relayer constructs a datagram of type `ConnOpenTry` and pushes this at Bob's module; - - 2.b. Bob handles this datagram via the `ConnTryHandler` handler. Once this handler finishes, the connection on his end is marked as `TRYOPEN`. - -3. This comprises two sub-steps: - - 3.a. Upon observing that Bob has finished executing `ConnTryHandler` (i.e., his state contains a `TRYOPEN` connection), the relayer constructs a datagram of type `ConnOpenAck` and pushes this at Alice's module; - - 3.b. Alice handles this datagram by invoking the `ConnAckHandler` handler. Once this handler finishes, Alice considers her `ConnectionEnd` in state `OPEN`. - -4. This comprises two sub-steps: - - 4.a. When the relayer observes that Alice finished handling the `ConnOpenAck` datagram (and consequently the connection is open on her side), the relayer constructs a `ConnOpenConfirm` datagram and pushes it on Bob's side. - - 4.b. Finally, Bob's module processes the `ConnOpenConfirm` datagram through an eponymous handler, which sets the connection state to `OPEN` for him and thereby marks the termination of the ICS3 protocol. - -The diagram below sketches these fours steps of handshake protocol. - -``` - The four steps comprising - IBC Connection Handshake Protocol - Alice Bob - +-----------+ +-----------+ - |ICS3 Module| |ICS3 Module| - | | | | - | step 1 | | - | ConnInitHandler | | - | ►INIT | | | - | |<-2.a--Relayer-----2.a-->| | - | | | | | - | | \-> step 2.b | - | | ConnTryHandler | - | | | ►TRYOPEN | - | <--3.a------Relayer--3.a->| | - | | | | | - | step 3.b <---/ | | - | ConnAckHandler | | - | ►OPEN | | | - | |<-4.a---Relayer----4.a-->| | - | | | | | - | | \-> step 4.b | - | | ConnConfirmHandler | - | | | ►OPEN | - +-----------+ +-----------+ -``` - - -#### Protocol handler signatures - -We first present the signatures of the four protocol handlers; the connection handshake module at each party implements and exposes these handlers. - -```golang -func ConnInitHandler( - local ConnectionParty, - remote ConnectionParty) - -func ConnTryHandler( - local ConnectionParty, - remote ConnectionParty, - remoteVersions []String, - proofsHeight uint64, - remoteConnectionProof ConnProof, - remoteClientProof ClientProof) - -func ConnAckHandler( - local ConnectionParty, - remoteVersion String, - proofsHeight uint64, - remoteConnectionProof ConnProof, - remoteClientProof ClientProof) - -func ConnConfirmHandler( - local ConnectionParty, - proofsHeight uint64, - remoteConnectionProof ConnProof) -``` - - - -#### Main Protocol Handlers - -We first define the four protocol handlers, and then describe the datagrams. -Finally, we discuss some helper functions. - -###### ConnInitHandler - -This is step 1. - -```golang -func ConnInitHandler(local ConnectionParty, remote ConnectionParty) -{ - // Create local end of the connection in the state 'INIT'. - connectionEnd = newConnectionEnd('INIT', local, remote, getCompatibleVersions()) - - // Uniqueness check. - // The Init handler for a given connection may run only once. - // Abort, unless this is the first time initializing this connection. - abortTransactionUnless(getConnection(local.connectionIdentifier) == nil) - - // Now save this connection end in the local store. - // This connectionEnd is in state INIT, the relayer will continue with - // next steps of the connection handshake protocol. - setConnection(local.connectionIdentifier, connectionEnd) -} -``` - -Preconditions: - - The parameters `local` and `remote` should be valid, i.e., a syntactically correct (see [validation](#Validation)). - - This handler must not have executed previously with the same `local` input parameter (in particular, the same `local.connectionIdentifier` field). - -Postconditions: - - The module executing this handler stores a `ConnectionEnd` (matching with the input arguments `local` and `remote`) in state `INIT`. - - Uniqueness is satisfied because this handler aborts if a key with the identifier `local.connectionIdentifier` already exists in the store (at-most-once semantics). - - To ensure termination of the ICS3 protocol, this handler must execute either on one of the chains or both (at-least-once semantics). - - Handler produces no output; may abort if preconditions not met. - - -###### ConnTryHandler - -This is step 2.b. - -```golang -func ConnTryHandler( - local ConnectionParty, - remote ConnectionParty, - remoteVersions []String, - proofsHeight uint64, - remoteConnectionProof ConnProof, - remoteClientProof ClientProof) -{ - // Create local end of the connection. - connectionEnd = newConnectionEnd('TRYOPEN', local, remote, pickVersion(remoteVersions)) - - // Uniqueness check. - // Verify that if there is a connection then the Init handler previously executed - // with these same parameters). - current = getConnection(local.connectionIdentifier) - abortTransactionUnless( - current == nil || - current.state == 'INIT' && matchingEnds(connectionEnd, current)) - - // Verify proofs. - expectedRemoteConnection = newConnectionEnd('INIT', remote, local, remoteVersions) - abortTransactionUnless(verifyProofs( - expectedRemoteConnection, proofsHeight, remoteConnectionProof, remoteClientProof) - - // Handler done, store the updated connectionEnd. - setConnection(local.connectionIdentifier, connectionEnd) -} -``` - -Preconditions: - - The input parameters should be valid; among others validation criterias, it is important that the module running this handler supports at least one of the versions supplied in the input list `remoteVersions` (see [validation](#Validation)). - - The two proofs `remoteConnectionProof` and `remoteClientProof` should be correct. This is necessary in connection to properties [ICS3-Proto-2-ConnectionIntegrity] and [ICS3-Proto-3-StateConsistency]. Correctness of proofs means that they pass verification (`verifyProof` function). - -Postconditions: - - The module executing this handler stores a `ConnectionEnd` (matching with the arguments `local` and `remote`) in state `TRYOPEN`. - - No output; may abort if preconditions not met. - - -###### ConnAckHandler - -Step 3.b. - -```golang -func ConnAckHandler( - local ConnectionParty, - remoteVersion String, - proofsHeight uint64, - remoteConnectionProof ConnProof, - remoteClientProof ClientProof) -{ - // Search the details of this connection in local store. - connectionEnd = getConnection(local.connectionIdentifier) - - // This verification helps guarantee uniqueness and integrity. - abortTransactionUnless(connectionEnd != nil && connectionEnd.local == local) - - // Should not overwrite a connection that is already `OPEN`. - abortTransactionUnless(connectionEnd.state == INIT || connectionEnd.state == TRYOPEN) - - // Verify proofs. - expectedRemoteConnection = newConnectionEnd('TRYOPEN', remote, local, remoteVersion) - abortTransactionUnless(verifyProofs( - expectedRemoteConnection, proofsHeight, remoteConnectionProof, remoteClientProof) - - // Termination (partial -- only at this end) - connectionEnd.state = OPEN - connectionEnd.version = remoteVersion - - // Update the connectionEnd in local state. - setConnection(local.connectionIdentifier, connectionEnd) -} -``` - -Preconditions: - - The module executing this handler already has a `ConnectionEnd` stored locally, matching the `local` argument, and in state `INIT` or `TRYOPEN`. - - The input parameters are valid (see [validation](#Validation)). - - The two proofs `remoteConnectionProof` and `remoteClientProof` are correct; for guaranteeing properties [ICS3-Proto-2-ConnectionIntegrity] and [ICS3-Proto-3-StateConsistency]. - -Postconditions: - - The module executing this handler stores a `ConnectionEnd` (matching with the arguments `local`) in state `OPEN`. - - No output; may abort if preconditions not met. - - -###### ConnConfirmHandler - -Step 4.b. - -```golang -func ConnConfirmHandler( - local ConnectionParty, - proofsHeight uint64, - remoteConnectionProof ConnProof) -{ - // Search the details of this connection in the local store. - connectionEnd = getConnection(local.connectionIdentifier) - - // Helps guarantee integrity and uniqueness. - abortTransactionUnless(connectionEnd != nil && connectionEnd.local == local) - - // Integrity check: the handler should not overwrite a connection that is - // already OPEN. And should not be working with a connection that is - // INIT. The only acceptable state is TRYOPEN. - abortTransactionUnless(connectionEnd.state == TRYOPEN) - - // Connection proof verification. - expectedRemoteConnection = newConnectionEnd('OPEN', remote, local, connectionEnd.version) - abortTransactionUnless(verifyProofs( - expectedRemoteConnection, proofsHeight, remoteConnectionProof, nil) - - // Termination. - connectionEnd.state = OPEN - setConnection(local.connectionIdentifier, connectionEnd) -} -``` - -Preconditions: - - The module executing this handler has a `ConnectionEnd` stored locally, (matching the input argument `local`) and in state `TRYOPEN`. - - The input parameters are valid (see [validation](#Validation)). - - The input proof `remoteConnectionProof` is correct; for guaranteeing property [ICS3-Proto-2-ConnectionIntegrity]. - -Postconditions: - - The module executing this handler stores a `ConnectionEnd` (matching with the input argument `local`) in state `OPEN`. - - No output; may abort if preconditions not met. - - -##### Datagrams - -A correct relayer can push the following datagrams at a chain. -Upon popping a datagram of a certain type, e.g., `ConnOpenConfirm`, a correct chain handles the datagram by first validating the datagram (syntactic validation, e.g., well-formed parameters, see [validation](#validation)) and then invoking the corresponding handler. - -```golang -type ConnOpenInit struct { - local ConnectionParty - remote ConnectionParty -} - -type ConnOpenTry struct { - local ConnectionParty - remote ConnectionParty - remoteVersions []String - proofsHeight uint64 - remoteConnectionProof ConnProof - remoteClientProof ClientProof -} - -type ConnOpenAck struct { - local ConnectionParty - remoteVersion String - proofsHeight uint64 - remoteConnectionProof ConnProof - remoteClientProof ClientProof -} - -type ConnOpenConfirm struct { - local ConnectionParty - proofsHeight uint64 - remoteConnectionProof ConnProof -} -``` - -Notice that the fields in each datagram match with the corresponding protocol handler signature. - -#### Helper functions & application-level predicates - -###### `matchingEnds(endLocal, endRemote)` - -Checks that the connection ends in the local and remote arguments match. -(NB: This does not check the connection state, only the connection parties). - -###### Validation -When we talk of validation we usually mean a verification that is done before a handler executes, checking that the input parameters of that handler are syntactically correct, e.g., valid `local` connection party, non-null proofs, non-zero proofs height. -With respect to the `local` parameter, similar in functionality with `validateConnectionIdentifier` from [ICS 003](#references), we also validate the `local.connectionIdentifier` field. -Additionally, if a `version` or `remoteVersion` input parameter is present, this should also be validated prior to invoking a handler with that version. - -###### `pickVersion(counterpartyVersions)` -This is identical with the one in [ICS 003](#references). - -###### `getCompatibleVersions()` -This is identical with the one in [ICS 003](#references). - -###### Proof Verification - -This function handles everything related to proof verification. -It requires four arguments: the connection end as it expected to be on the remote party, the height where the proofs were taken, a connection proof, and a client proof; the last argument may be nil. -This function invokes a lower-level method `client.verifyProof`, which executes from a client context and handles the cryptographic verification of a given proof; this method is analogous to `verifyMembership` of the [ICS 023](#references), except it must additionally get the commitment root from the consensuss state and perform serialization on the expected data. -The pseudocode for the `verifyProofs` is provided below. - -```golang -func verifyProofs( - expectedConnection ConnectionEnd, - proofsHeight, - remoteConnectionProof ConnProof, - remoteClientProof ClientProof) -{ - // Proofs verification. - // Generic proof check: Local client should exists and be updated with the - // height which the two proofs target (proofsHeight). - client = clientState(expectedConnection.remote.clientIdentifier) - abortTransactionUnless(client != nil) - clientConsensusState = clientConsensusState(expectedConnection.remote.clientIdentifier), proofsHeight) - abortTransactionUnless(clientConsensusState != nil) - - // Connection proof specific verification. - abortTransactionUnless(client.verifyProof( - clientConsensusState, remoteConnectionProof, - connectionPath(local.connectionIdentifier), expectedConnection)) - - if remoteClientProof == nil { - return // Client proof is missing, so skip any further verification. - } - - // Client proof verification ensures state consistency: the remote - // client should be consistent with the state of local party. - expectedRemoteClientConsensus = localConsensusState(remoteClientProof.height) - abortTransactionUnless(remoteClientProof.height <= getCurrentHeight()) - abortTransactionUnless(client.verifyProof( - clientConsensusState, remoteClientProof, - clientPath(local.clientIdentifier, remoteClientProof.height), - expectedRemoteClientConsensus)) -} -``` - -###### `localConsensusState` and `clientConsensusState` - -These are analogous to `getConsensusState` and `queryClient` functions. -The `clientConsensusState`, in particular, has an implicit requirement that it may not return the consensus state of a client if the height is older than a certain threshold; the `proofsHeight` should not be outdated by more than `100` heights, specifically. (This means that proofs must not be too old.) - - -### Open questions: - -- Standard cryptographic assumptions (collision-resistant hash functions, public-key signatures). We should mention these once we have more details about the implementation. - -- How to capture aborts or incorrect termination? See [issue raised by Anca](https://github.com/informalsystems/ibc-rs/pull/42#discussion_r397077901). Is this necessary? To inform this discussion, more implementation details are needed. - -- Verification of the unbonding period in `ConnTryHandler`. See [ICS/#403](https://github.com/cosmos/ibc/issues/403). - -- Missing link to L3: what is the mechanism that implements the `pop` functionality at the implementation/L3 level (hint: it's in the SDK, the layer sitting between the consensus module and IBC Handler). - -## References - -- [ICS 003] Interchain Standards [ICS 003 Connection Semantics](https://github.com/cosmos/ibc/tree/master/spec/core/ics-003-connection-semantics). -- [ICS 024] Interchain Standards [ICS 024 Host Requirements](https://github.com/cosmos/ibc/tree/master/spec/core/ics-024-host-requirements). -- [ICS 018] Interchain Standards [ICS 024 Host Requirements](https://github.com/cosmos/ibc/tree/master/spec/relayer/ics-018-relayer-algorithms). diff --git a/docs/spec/connection-handshake/L2-tla/Environment.tla b/docs/spec/connection-handshake/L2-tla/Environment.tla deleted file mode 100644 index ed05bea075..0000000000 --- a/docs/spec/connection-handshake/L2-tla/Environment.tla +++ /dev/null @@ -1,406 +0,0 @@ ----------------------------- MODULE Environment ---------------------------- - -(*************************************************************************** - - This module is part of the TLA+ specification for the IBC Connection - Handshake protocol (identifier 'ICS3'). This is a high-level spec of ICS3. - - This module captures the operators and actions outside of the ICS3 protocol - itself (i.e., the environment). - Among others, the environment does the following: - - creates two instances of ICS3Module; - - wires these instances together; - - provides the initialization step for ICS3 protocol, concretely a - "ICS3MsgInit" message, so that the two instances can perform the protocol; - - some relayer functionality: passes any outgoing message from a chain - into the ingoing buffer of the other (destination) chain and correspondingly - updates the client of the destination chain; - - also, advances the chain of each instance non-deterministically; - - if `Concurrency` is TRUE, then this module can take non-deterministic - steps, by updating the client on a chain. - - ***************************************************************************) - -EXTENDS Naturals, FiniteSets, Sequences, ICS3Utils - - -CONSTANT MaxHeight, \* Maximum height of any chain in the system. - MaxBufLen, \* Length (size) of message buffers. - Concurrency, \* Flag for enabling concurrent relayers. - MaxVersionNr, \* Maximum version number. - VersionPickMode \* The mode for picking versions. - - -ASSUME MaxHeight > 4 -ASSUME MaxBufLen >= 1 -ASSUME VersionPickMode \in {"overwrite", "onTryDet", "onTryNonDet", "onAckDet", "onAckNonDet"} - -(* -VersionPickMode: - * "overwrite" -- the version is picked deterministically when handling - ICS3MsgTry from the intersection of versions sent in the - message and locally supported versions. The picked version - is sent to the counterparty chain in ICS3MsgAck, which overwrites its - own version with the one from the message - * "onTryDet" -- the version is picked deterministically when handling - ICS3MsgTry from the intersection of versions sent in the - message and locally supported versions. The picked version - is sent to the counterparty chain in ICS3MsgAck, which accepts it - * "onTryNonDet" -- same as "onTryDet", except the version is picked - non-deterministically - * "onAckDet" -- the version is picked deterministically when handling - ICS3MsgAck from the intersection of versions sent in the - message and locally supported versions. The picked version - is sent to the counterparty chain in ICS3MsgConfirm, which accepts it - * "onAckNonDet" -- same as "onAckDet", except the version is picked - non-deterministically -*) - -VARIABLES - inBufChainA, \* A buffer (sequence) for messages inbound to chain A. - inBufChainB, \* A buffer for messages inbound to chain B. - outBufChainA, \* A buffer for messages outgoing from chain A. - outBufChainB, \* A buffer for messages outgoing from chain B. - storeChainA, \* The local store of chain A. - storeChainB \* The local store of chain B. - -(************* ChainAConnectionEnds & ChainBConnectionEnds ***************** - - The set of records that each chain can use as a valid local connection - end. For each chain, this set contains one record, since we are - modeling a single connection in this specification. - - ***************************************************************************) - -AllChainIDs == - { "chainA", "chainB" } - -AllVersionSeqs == - {<<>>} \union - {<> : a \in 1..MaxVersionNr} \union - {<> \in (1..MaxVersionNr) \X (1..MaxVersionNr) : a /= b} - -ChainAConnectionEnds == - [ - connectionID : { "connAtoB" }, - clientID : { "clientOnAToB" } - ] -ChainBConnectionEnds == - [ - connectionID : { "connBtoA" }, - clientID : { "clientOnBToA" } - ] - -AllConnectionEnds == - ChainAConnectionEnds \union ChainBConnectionEnds - -AllClientIDs == - { x.clientID : x \in AllConnectionEnds } - -AllConnectionIDs == - { x.connectionID : x \in AllConnectionEnds } - -ChainAClientIDs == - { x.clientID : x \in ChainAConnectionEnds } - -ChainBClientIDs == - { x.clientID : x \in ChainBConnectionEnds } - -ChainAConnectionIDs == - { x.connectionID : x \in ChainAConnectionEnds } - -ChainBConnectionIDs == - { x.connectionID : x \in ChainBConnectionEnds } - -(* Bundle with variables that chain A has access to. *) -chainAVars == <> (* The local chain store. *) - -(* Bundle with variables that chain B has access to. *) -chainBVars == <> (* Local chain store. *) - -(* All variables specific to both chains. *) -chainStoreVars == <> - -allVars == <> - - -(* This is a separate module comprising common type definitions. *) -INSTANCE ICS3Types - -chmA == INSTANCE ICS3Module - WITH MaxChainHeight <- MaxHeight, - inBuf <- inBufChainA, - outBuf <- outBufChainA, - store <- storeChainA, - ConnectionIDs <- ChainAConnectionIDs, - ClientIDs <- ChainAClientIDs, - ChainID <- "chainA" - - -chmB == INSTANCE ICS3Module - WITH MaxChainHeight <- MaxHeight, - inBuf <- inBufChainB, - outBuf <- outBufChainB, - store <- storeChainB, - ConnectionIDs <- ChainBConnectionIDs, - ClientIDs <- ChainBClientIDs, - ChainID <- "chainB" - - -(*************************************************************************** - Environment actions. - ***************************************************************************) - - -(* Environment initialization. - - This action kick-starts the ICS3 protocol by assigning an ICS3MsgInit - msg to either of the two chains (or both). - - *) -InitEnv == - /\ \/ /\ inBufChainA \in {<> : (* ICS3MsgInit to chain A. *) - msg \in InitMsgs(ChainAConnectionEnds, ChainBConnectionEnds)} - /\ inBufChainB = <<>> - \/ /\ inBufChainB \in {<> : (* ICS3MsgInit to chain B. *) - msg \in InitMsgs(ChainBConnectionEnds, ChainAConnectionEnds)} - /\ inBufChainA = <<>> - \/ /\ inBufChainA \in {<> : (* ICS3MsgInit to both chains. *) - msg \in InitMsgs(ChainAConnectionEnds, ChainBConnectionEnds)} - /\ inBufChainB \in {<> : - msg \in InitMsgs(ChainBConnectionEnds, ChainAConnectionEnds)} - /\ outBufChainA = <<>> (* Output buffers should be empty initially. *) - /\ outBufChainB = <<>> - - -(* Message relaying functionality of the environment. - - This is part of the RelayNextEnv sub-action of the environment. - This performs a basic relaying step, that is, passing a message from the - output buffer of one of the chains (paramter 'from') into the input buffer - of another chain (parameter 'to'). - - *) -RelayMessage(from, to) == - /\ from # <<>> - /\ Len(to) < MaxBufLen - 1 - /\ to' = Append(to, Head(from)) - /\ from' = Tail(from) - - -(* Default next step for environment. - - This step may change (non-deterministically) either of the store of chain A - or B, by advancing the height of that chain. This can only enable if the - respective chain has ample steps left, i.e., the chain height is not within 4 steps - of the maximum height. This precondition disallow continuos advancing of chain heights, - and therefore allows chains to take meaningful steps (executing the ICS3 protocol to - completion). - - *) -DefaultNextEnv == - \/ /\ MaxHeight - storeChainA.latestHeight > 4 - /\ chmA!AdvanceChainHeight - /\ UNCHANGED<> - \/ /\ MaxHeight - storeChainB.latestHeight > 4 - /\ chmB!AdvanceChainHeight - /\ UNCHANGED<> - - -(* A concurrent UpdateClient step for the environment. - - This updates the client on one of the chains with the latest height of the other chain. - This step helps to simulate the conditions of having multiple relayers acting in parallel. - -*) -ConcurrentUpdateClient == - \/ /\ chmB!CanUpdateClient(storeChainA.latestHeight) - /\ chmB!UpdateClient(storeChainA.latestHeight) - /\ UNCHANGED<> - \/ /\ chmA!CanUpdateClient(storeChainB.latestHeight) - /\ chmA!UpdateClient(storeChainB.latestHeight) - /\ UNCHANGED<> - - -(* Relaying step for the environment. - - This step performs a relay: moving a message between the output - buffer of a chain to the input buffer of the other chain, and updating accordingly - the client on the latter chain. - - *) -RelayNextEnv == - (* Relay direction: from chain A to chain B. *) - \/ LET msg == Head(outBufChainA) - targetHeight == IF MessageTypeIncludesConnProof(msg.type) - THEN msg.proofHeight - ELSE storeChainA.latestHeight - IN /\ RelayMessage(outBufChainA, inBufChainB) - /\ \/ chmB!CanUpdateClient(targetHeight) - /\ chmB!UpdateClient(targetHeight) - \/ ~ chmB!CanUpdateClient(targetHeight) - /\ UNCHANGED storeChainB - /\ UNCHANGED<> - (* Relay direction: from chain B to chain A. *) - \/ LET msg == Head(outBufChainB) - targetHeight == IF MessageTypeIncludesConnProof(msg.type) - THEN msg.proofHeight - ELSE storeChainB.latestHeight - IN /\ RelayMessage(outBufChainB, inBufChainA) - /\ \/ chmA!CanUpdateClient(targetHeight) - /\ chmA!UpdateClient(targetHeight) - \/ ~ chmA!CanUpdateClient(targetHeight) - /\ UNCHANGED storeChainA - /\ UNCHANGED<> - - -(* Environment next action. - - There are three possible actions that the environment may perform: - - 1. If `Concurrency` flag is TRUE, then the environment may update the - client on one of the two chains. This effectively models what happens - when more than a relayer triggers the `UpdateClient` action of a chain, - a condition that can lead to liveness (termination) problems in ICS3. - - 2. A 'DefaultNextEnv' step, that simply advances the height of one of - the chains unless the chain has just a few (namely, `4`) heights left. - - 3. The environment may perform a relaying step, that is: - if there is a message in the ougoing buffer of a chain, the relayer - moves this message to the ingoing buffer of the other chain, and also - updates the client on the latter chain. - - *) -NextEnv == - \/ Concurrency /\ ConcurrentUpdateClient - \/ DefaultNextEnv - \/ RelayNextEnv - \/ UNCHANGED allVars - - -(* Enables when the connection is open on both chains. - - State predicate signaling that the protocol terminated correctly. - - *) -ICS3ReachedOpenConnection == - /\ storeChainA.connection.state = "OPEN" - /\ storeChainB.connection.state = "OPEN" - /\ UNCHANGED allVars - - -(* Enables when both chains are stuck, i.e., unable to progress while - their connection is not opened. - - State predicate signaling that the protocol terminated unsucessfully. - - *) -ICS3ImpossibleToAdvance == - /\ \/ (~ chmA!CanAdvance /\ storeChainA.connection.state # "OPEN") - \/ (~ chmB!CanAdvance /\ storeChainB.connection.state # "OPEN") - /\ UNCHANGED allVars - - -(****************************************************************************** - - Main spec. The system comprises the environment plus the two instances of - ICS3 modules. - - *****************************************************************************) - - -(* Initializes both chains, attributing to each a chainID and a client. - The ChainVersionsOverlap predicate is a necessary assumption for termination. - *) -Init == - /\ chmA!Init - /\ chmB!Init - /\ ChainVersionsOverlap(storeChainA, storeChainB) - /\ InitEnv - - -(* The two ICS3 modules and the environment alternate their steps - non-deterministically. Eventually, the execution ends with either - successful (ICS3ReachedOpenConnection sub-action) or unsuccesfull - (ICS3ImpossibleToAdvance sub-action) termination. -*) -Next == - \/ ICS3ReachedOpenConnection - \/ ICS3ImpossibleToAdvance - \/ NextEnv - \/ chmA!Next /\ UNCHANGED chainBVars - \/ chmB!Next /\ UNCHANGED chainAVars - - -FairProgress == - /\ chmA!Fairness - /\ chmB!Fairness - /\ WF_<>(RelayNextEnv) - - -Spec == - /\ Init - /\ [][Next]_<> - /\ FairProgress - - -TypeInvariant == - /\ chmA!TypeInvariant - /\ chmB!TypeInvariant - - -(* Liveness property. - - We expect to eventually always reach an OPEN connection on both chains. - - Naturally, this property may not hold if the two chains do not have - sufficient number of heights they can advance to. In other words, the - `MaxHeight` constant should be at least `4` for this property to hold. - - The `Concurrency` constant may also affect liveness. -*) -Termination == - <> [](/\ \/ storeChainA.connection.state = "OPEN" - \/ storeChainA.latestHeight = MaxHeight - /\ \/ storeChainB.connection.state = "OPEN" - \/ storeChainB.latestHeight = MaxHeight) - -(* Safety property. - - If the connections in the two chains are not null, then the - connection parameters must always match. - *) -ConsistencyProperty == - /\ storeChainA.connection.state # "UNINIT" - /\ storeChainB.connection.state # "UNINIT" - => storeChainA.connection.parameters - = chmB!FlipConnectionParameters(storeChainB.connection.parameters) - - -Consistency == - [] ConsistencyProperty - -(* Complementary to the safety property above. - - If the connections in the two chains are both OPEN, then the - connection version must be identical. - *) -VersionInvariant == - /\ storeChainA.connection.state = "OPEN" - /\ storeChainB.connection.state = "OPEN" - => /\ Len(storeChainA.connection.version) = 1 - /\ Len(storeChainB.connection.version) = 1 - /\ storeChainA.connection.version = storeChainB.connection.version - -============================================================================= -\* Modification History -\* Last modified Fri Aug 28 09:11:35 CEST 2020 by adi -\* Last modified Tue Aug 25 17:48:37 CEST 2020 by ilinastoilkovska -\* Created Fri Apr 24 18:51:07 CEST 2020 by adi \ No newline at end of file diff --git a/docs/spec/connection-handshake/L2-tla/ICS3Module.tla b/docs/spec/connection-handshake/L2-tla/ICS3Module.tla deleted file mode 100644 index 1716b4d8fb..0000000000 --- a/docs/spec/connection-handshake/L2-tla/ICS3Module.tla +++ /dev/null @@ -1,580 +0,0 @@ --------------------------- MODULE ICS3Module ------------------------------ - -(*************************************************************************** - - This module is part of the TLA+ specification for the - IBC Connection Handshake protocol (ICS3). - - This module captures the actions and operators of the ICS3 protocol. - Typically, it is an IBC module running on a chain that would implement - the logic in this TLA+ module, hence the name "ICS3Module". - sometimes abbreviated to "chModule" or "chm". - - This module deals with a high-level spec of the ICS3 protocol, so it is - a simplification with respect to ICS3 proper in several regards: - - - the modules assumes to run on a chain which we model as a simple - advancing height, plus a few more critical fields (see the 'store'), - but without any state (e.g., blockchain, transactions, consensus core); - - - we model a single connection; establishing multiple connections is not - possible; - - - we do not perform any cryptographic proof verifications; - - - the abstractions we use are higher-level, and slightly different from - the ones in ICS3 (see e.g., ConnectionEnd and Connection records). - - - the client colocated with the module is simplified, comprising only - a set of heights (not the actual blockchain headers). - - ***************************************************************************) - -EXTENDS Naturals, FiniteSets, Sequences, ICS3Utils, ICS3Types - - -CONSTANTS MaxChainHeight, \* Maximum height of the local chain. - ConnectionIDs, \* The set of valid connection IDs. - ClientIDs, \* The set of valid client IDs. - MaxBufLen, \* Maximum length of the input and output buffers. - MaxVersionNr, \* Maximum version number - ChainID, \* The chainID - VersionPickMode \* the mode for picking versions - -ASSUME Cardinality(ConnectionIDs) >= 1 -ASSUME Cardinality(ClientIDs) >= 1 - - -VARIABLES -(******************************* Store ***************************** - - The store record of a chain contains the following fields: - - - chainID -- a string. - Stores the identifier of the chain where this module executes. - - - latestHeight -- a natural number in the range 1..MaxHeight. - Describes the current height of the chain. - - - connection -- a connection record. - Captures all the details of the connection on this chain. - For a full description of a connection record, see the - 'Environment.Connections' set. - - - client -- a client record. - Specifies the state of the client running on this chain. - - A client record contains the following fields: - - - consensusHeights -- a set of heights. - Stores the set of all heights (i.e., consensus states) that this - client observed. - - - clientID -- a string. - The identifier of the client. - - - latestHeight -- a natural number in the range 1..MaxHeight. - Stores the latest height among all the heights in consensusHeights. - - For more details on how clients are initialized, see the operator - ICS3Types.InitClients. - - ***************************************************************************) - store, - (* A buffer (Sequence) holding any message(s) incoming to this module. *) - inBuf, - (* A buffer (Sequence) holding outbound message(s) from this module. *) - outBuf - - -moduleVars == - <> - - -(*************************************************************************** - Helper operators. - ***************************************************************************) - - -(* Simple computation returning the maximum out of two numbers 'a' and 'b'. - *) -MAX(a, b) == - IF a > b THEN a ELSE b - -MAXSet(S) == - CHOOSE x \in S: \A y \in S: y <= x - - -(* Validates a connection parameter. - - Returns true if 'para' matches the parameters in the local connection, - and returns false otherwise. - - *) -ValidConnectionParameters(para) == - LET local == store.connection.parameters.localEnd - remote == store.connection.parameters.remoteEnd - IN /\ local.connectionID = para.localEnd.connectionID - /\ remote.connectionID = para.remoteEnd.connectionID - /\ local.clientID = para.localEnd.clientID - /\ remote.clientID = para.remoteEnd.clientID - - -(* Validates a connection parameter local end. - - Expects as input a ConnectionParameter 'para' and returns true or false. - This is a basic validation step, making sure that the local end in 'para' - is valid with respect to module-level constants ConnectionIDs and ClientIDs. - -*) -ValidLocalEnd(para) == - /\ para.localEnd.connectionID \in ConnectionIDs - /\ para.localEnd.clientID \in ClientIDs - -(* Operator for reversing the connection ends. - - Given a ConnectionParameters record 'para', returns a new set - of parameters where the local and remote ends are - flipped (i.e., reversed). - *) -FlipConnectionParameters(para) == - [localEnd |-> para.remoteEnd, - remoteEnd |-> para.localEnd] - - -(* Operator for constructing a connection proof. - - The connection proof is used to demonstrate to another chain that the - local store on this chain comprises a connection in a certain state. - *) -GetConnProof(myConnection) == - [connection |-> myConnection] - - -(* Operator for constructing a client proof. - *) -GetClientProof == - [latestHeight |-> store.client.latestHeight, - consensusHeights |-> store.client.consensusHeights] - - -(* Verification of a connection proof. - - This is a state predicate returning true if the following holds: - - the state of connection in this proof should match with input parameter - 'expectedState'; and - - the connection parameters in this proof should match with the flipped version - of the input 'expectedParams'. - - *) -VerifyConnProof(cp, expectedState, expectedParams) == - /\ cp.connection.state = expectedState - /\ cp.connection.parameters = FlipConnectionParameters(expectedParams) - - -(* Verification of a client proof. - - This is a state predicate returning true if the following holds: the height - reported in the client proof must not exceed the current (latestHeight) of - this chain. - *) -VerifyClientProof(cp) == - /\ cp.latestHeight <= store.latestHeight (* Consistency height check. *) - /\ cp.latestHeight \in cp.consensusHeights (* Client verification step. *) - - -(* Get all possible version sequences from a set of versions. - *) -VersionSetAsVersionSequences(S) == - LET E == 1..Cardinality(S) IN - LET AllSeqs == [E -> S] IN - {seq \in AllSeqs : seq \in AllVersionSeqs} - -(*************************************************************************** - Connection Handshake Module actions & operators. - ***************************************************************************) - - -(* Modifies the local store. - - Replaces the connection in the store with the argument 'newCon'. - This action also advances the chain height. - *) -NewStore(newCon) == - [store EXCEPT !.connection = newCon, - !.latestHeight = @ + 1] - - -(********************************** - ICS3 spec related to Init messages. - **********************************) - -(* State predicate, guarding the handler for the Init msg. - - If any of these preconditions does not hold, the message - is dropped. - *) -PreconditionsInitMsg(m) == - /\ ValidLocalEnd(m.parameters) (* Basic validation of localEnd in parameters. *) - /\ store.connection.state = "UNINIT" - -(* Reply message to an ICS3MsgInit message. - *) -MsgInitReply(chainStore) == - LET conn == chainStore.connection - myConnProof == GetConnProof(conn) - myClientProof == GetClientProof - replyMsg == [parameters |-> FlipConnectionParameters(conn.parameters), - type |-> "ICS3MsgTry", - proofHeight |-> chainStore.latestHeight, - connProof |-> myConnProof, - clientProof |-> myClientProof, - version |-> conn.version] IN - replyMsg - -(* Handles a "ICS3MsgInit" message 'm'. - - Primes the store.connection to become initialized with the parameters - specified in 'm'. Also creates a reply message, enqueued on the outgoing - buffer. This reply message will include proofs that match the height of - this chain (i.e., current store.latestHeight + 1). - *) -HandleInitMsg(m) == - LET newCon == [parameters |-> m.parameters, - state |-> "INIT", - version |-> store.connection.version] - newStore == NewStore(newCon) IN - IF PreconditionsInitMsg(m) - THEN {newStore} - ELSE {store} - - -(********************************** - ICS3 spec related to Try messages. - **********************************) - -(* State predicate, guarding the handler for the Try msg. - - If any of these preconditions does not hold, the message - is dropped. - *) -PreconditionsTryMsg(m) == - /\ \/ /\ store.connection.state = "UNINIT" - /\ ValidLocalEnd(m.parameters) - \/ /\ store.connection.state = "INIT" - /\ ValidConnectionParameters(m.parameters) - /\ m.proofHeight \in store.client.consensusHeights (* Consistency height check. *) - /\ VerifyConnProof(m.connProof, "INIT", m.parameters) - /\ VerifyClientProof(m.clientProof) - \* check if the locally stored versions overlap with the versions sent in - \* the ICS3MsgTry message - /\ VersionSequencesOverlap(store.connection.version, m.version) - -(* Pick a version depending on the value of the constant VersionPickMode - - - if VersionPickMode = "onTryNonDet" or VersionPickMode = "overwrite" - -> pick a version from (m.version \intersect store.connection.version) non-deterministically, - send the picked version to counterparty in ICS3MsgAck - - if VersionPickMode = "onTryNonDet" - -> pick a version from (m.version \intersect store.connection.version) deterministically - (e.g., maximum), send the picked version to counterparty in ICS3MsgAck - - otherwise (version picking is done when handling ICS3MsgAck) - -> send the value of the intersection (m.version \intersect store.connection.version) - to counterparty in ICS3MsgConfirm -*) -PickVersionOnTry(m) == - \* get a set of feasible versions -- - \* the intersection between the local and the versions sent in the message - LET feasibleVersions == SequenceAsSet(m.version) - \intersect - SequenceAsSet(store.connection.version) IN - - IF feasibleVersions /= {} - THEN IF \/ VersionPickMode = "overwrite" - \/ VersionPickMode = "onTryNonDet" - \* the version is picked non-deterministically - THEN {<> : newVersion \in feasibleVersions} - ELSE IF VersionPickMode = "onTryDet" - \* the version is picked deterministically, - \* using MAXSet as a deterministic choice function - THEN {<>} - \* the version will be picked when handling ICS3MsgAck, - \* send a sequence which consists of elements in the - \* set feasibleVersions - ELSE VersionSetAsVersionSequences(feasibleVersions) - ELSE {} - -(* Reply message to an ICS3MsgTry message. - *) -MsgTryReply(chainStore) == - LET conn == chainStore.connection - myConnProof == GetConnProof(conn) - myClientProof == GetClientProof - replyMsg == [parameters |-> FlipConnectionParameters(conn.parameters), - type |-> "ICS3MsgAck", - proofHeight |-> chainStore.latestHeight, - connProof |-> myConnProof, - clientProof |-> myClientProof, - version |-> conn.version] IN - replyMsg - -(* Handles a "ICS3MsgTry" message. - *) -HandleTryMsg(m) == - \* create a set of new connections, whose versions - \* were picked in OnTryPickVersion - LET newConnSet == [parameters : {m.parameters}, - state : {"TRYOPEN"}, - version : PickVersionOnTry(m)] - newStoreSet == {NewStore(newConn) : newConn \in newConnSet} IN - - IF /\ PreconditionsTryMsg(m) - /\ newStoreSet /= {} - THEN newStoreSet - ELSE {store} - - -(********************************** - ICS3 spec related to Ack messages. - **********************************) - -(* State predicate, guarding the handler for the Ack msg. - *) -PreconditionsAckMsg(m) == - /\ \/ store.connection.state = "INIT" - \/ store.connection.state = "TRYOPEN" - /\ ValidConnectionParameters(m.parameters) - /\ m.proofHeight \in store.client.consensusHeights (* Consistency height check. *) - /\ VerifyConnProof(m.connProof, "TRYOPEN", m.parameters) - /\ VerifyClientProof(m.clientProof) - /\ IF VersionPickMode /= "overwrite" - \* check if the locally stored versions overlap with the versions sent in - \* the ICS3MsgAck message if VersionPickMode /= "overwrite" - THEN VersionSequencesOverlap(store.connection.version, m.version) - \* if VersionPickMode = "overwrite", do not check for version overlap - ELSE TRUE - -(* Pick a version depending on the value of the constant VersionPickMode - - - if VersionPickMode = "overwrite" - -> take the picked version from the message - - if VersionPickMode = "onAckNonDet" - -> pick a version from (m.version \intersect store.connection.version) non-deterministically, - send the picked version to counterparty in ICS3MsgConfirm - - if VersionPickMode = "onAckDet" - -> pick a version from (m.version \intersect store.connection.version) deterministically - (e.g., maximum), send the picked version to counterparty in ICS3MsgConfirm - - otherwise (version picking was done when handling ICS3MsgTry) - -> use m.version if (m.version \intersect store.connection.version) is not empty - (checked in PreconditionsAckMsg) - -*) -PickVersionOnAck(m) == - \* get a set of feasible versions -- - \* the intersection between the local and the versions sent in the message - LET feasibleVersions == SequenceAsSet(m.version) - \intersect - SequenceAsSet(store.connection.version) IN - - IF VersionPickMode = "overwrite" - \* take the picked version from the message - THEN {m.version} - ELSE IF feasibleVersions /= {} - THEN IF VersionPickMode = "onAckNonDet" - \* the version is picked non-deterministically - THEN {<> : newVersion \in feasibleVersions} - ELSE IF VersionPickMode = "onAckDet" - \* the version is picked deterministically, - \* using MAXSet as a deterministic choice function - THEN {<>} - \* the version was picked when handling ICS3MsgTry, - \* use the picked version from the ICS3MsgAck message - ELSE {m.version} - ELSE {} - -(* Reply message to an ICS3MsgAck message. - *) -MsgAckReply(chainStore) == - LET conn == chainStore.connection - myConnProof == GetConnProof(conn) - replyMsg == [parameters |-> FlipConnectionParameters(conn.parameters), - proofHeight |-> chainStore.latestHeight, - type |-> "ICS3MsgConfirm", - connProof |-> myConnProof, - version |-> conn.version] IN - replyMsg - -(* Handles a "ICS3MsgAck" message. - *) -HandleAckMsg(m) == - LET newConnSet == [parameters : {m.parameters}, - state : {"OPEN"}, - version : PickVersionOnAck(m)] - newStoreSet == {NewStore(newConn) : newConn \in newConnSet} IN - - IF /\ PreconditionsAckMsg(m) - /\ newStoreSet /= {} - THEN newStoreSet - ELSE {store} - - -(************************************** - ICS3 spec related to Confirm messages. - **************************************) - -(* State predicate, guarding the handler for the Confirm msg. - *) -PreconditionsConfirmMsg(m) == - /\ store.connection.state = "TRYOPEN" - /\ ValidConnectionParameters(m.parameters) - /\ m.proofHeight \in store.client.consensusHeights (* Consistency height check. *) - /\ VerifyConnProof(m.connProof, "OPEN", m.parameters) - /\ IF VersionPickMode /= "overwrite" - \* check if the locally stored versions overlap with the versions sent in - \* the ICS3MsgConfirm message if VersionPickMode /= "overwrite" - THEN IF \/ VersionPickMode = "onAckNonDet" - \/ VersionPickMode = "onAckDet" - \* if the version was picked on handling ICS3MsgAck, check for intersection - THEN VersionSequencesOverlap(store.connection.version, m.version) - \* if the version was picked on handling ICS3MsgTry, check for equality - ELSE store.connection.version = m.version - \* if VersionPickMode = "overwrite", do not check for version overlap - ELSE TRUE - -(* Pick a version depending on the value of the constant VersionPickMode - - - if VersionPickMode = "overwrite" - -> take the picked version from the message - - if VersionPickMode = "onAckNonDet" - -> pick a version from store.connection.version non-deterministically - - if VersionPickMode = "onAckDet" - -> pick a version from store.connection.version deterministically - (e.g., maximum) - - otherwise - -> use store.connection.version if - * version picking was done when handling ICS3MsgAck and - (m.version \intersect store.connection.version) is not empty - * version picking was done when handling ICS3MsgTry and - m.version = store.connection.version - (both conditions checked in PreconditionsAckMsg) - -*) -PickVersionOnConfirm(m) == - IF VersionPickMode = "overwrite" - \* take the picked version from the message - THEN {m.version} - ELSE IF VersionPickMode = "onAckNonDet" - \* the version is picked non-deterministically - THEN {<> : newVersion \in SequenceAsSet(store.connection.version)} - ELSE IF VersionPickMode = "onAckDet" - \* the version is picked deterministically, - \* using MAXSet as a deterministic choice function - THEN {<>} - \* the version was picked when handling ICS3MsgTry, - \* use the picked version from the ICS3MsgAck message - ELSE {store.connection.version} - -(* Handles a "ICS3MsgConfirm" message. - *) -HandleConfirmMsg(m) == - LET newConnSet == [parameters : {m.parameters}, - state : {"OPEN"}, - version : PickVersionOnConfirm(m)] - newStoreSet == {NewStore(newConn) : newConn \in newConnSet} IN - - IF /\ PreconditionsConfirmMsg(m) - /\ newStoreSet /= {} - THEN newStoreSet - ELSE {store} - - -(* Action for advancing the current height (latestHeight) of the chain. - - The environment triggers this as part of the GoodNextEnv action. - *) -AdvanceChainHeight == - store' = [store EXCEPT !.latestHeight = @ + 1] - - -(* State predicate returning true if MaxChainHeight not yet attained. - *) -CanAdvance == - store.latestHeight < MaxChainHeight - - -(* Action for updating the local client on this chain with a new height. - - This primes the store; leaves the chain buffers unchanged. - This will also advance the chain height. - *) -UpdateClient(height) == - /\ store' = [store EXCEPT !.latestHeight = @ + 1, - !.client.consensusHeights = @ \cup {height}, - !.client.latestHeight = MAX(height, store.client.latestHeight)] - - -(* State predicate guarding the UpdateClient action. - - This requires client updates to be monotonic (prevents updates with older - heights). - *) -CanUpdateClient(newHeight) == - /\ CanAdvance - /\ newHeight > store.client.latestHeight - - -(* Generic action for handling any type of inbound message. - - Expects as parameter a message. - Takes care of priming the 'store' and adding any reply msg in 'outBuf'. - This action assumes the message type is valid, therefore one of the - disjunctions (in the CASE statements) will always enable. - *) -ProcessMsg == - /\ inBuf /= <<>> - /\ CanAdvance - /\ LET m == Head(inBuf) - resStores == CASE m.type = "ICS3MsgInit" -> HandleInitMsg(m) - [] m.type = "ICS3MsgTry" -> HandleTryMsg(m) - [] m.type = "ICS3MsgAck" -> HandleAckMsg(m) - [] m.type = "ICS3MsgConfirm" -> HandleConfirmMsg(m) IN - /\ store' \in resStores - /\ outBuf' = CASE m.type = "ICS3MsgInit" (* Get reply to the Init msg. *) - /\ store'.connection.state = "INIT" -> Append(outBuf, MsgInitReply(store')) - [] m.type = "ICS3MsgTry" (* Get reply to the Try msg. *) - /\ store'.connection.state = "TRYOPEN" -> Append(outBuf, MsgTryReply(store')) - [] m.type = "ICS3MsgAck" (* Get reply to the Ack msg. *) - /\ store'.connection.state = "OPEN" -> Append(outBuf, MsgAckReply(store')) - [] TRUE -> outBuf (* Default case: no reply necessary. *) - /\ inBuf' = Tail(inBuf) - - -(*************************************************************************** - Connection Handshake Module (ICS3) main spec. - ***************************************************************************) - -Init == - store \in [chainID : {ChainID}, - latestHeight : {1}, - connection : NullConnections, - client : InitClients(ClientIDs)] - -Next == - \/ ProcessMsg - \/ UNCHANGED moduleVars - -Fairness == - WF_moduleVars(ProcessMsg) - - -TypeInvariant == - /\ inBuf \in Seq(ConnectionHandshakeMessages) \union {<<>>} - /\ outBuf \in Seq(ConnectionHandshakeMessages) \union {<<>>} - /\ store \in Stores - - -============================================================================= -\* Modification History -\* Last modified Thu Aug 27 16:00:21 CEST 2020 by adi -\* Last modified Wed Aug 26 17:05:35 CEST 2020 by ilinastoilkovska -\* Created Fri Apr 24 19:08:19 CEST 2020 by adi \ No newline at end of file diff --git a/docs/spec/connection-handshake/L2-tla/ICS3Types.tla b/docs/spec/connection-handshake/L2-tla/ICS3Types.tla deleted file mode 100644 index fed0d0c045..0000000000 --- a/docs/spec/connection-handshake/L2-tla/ICS3Types.tla +++ /dev/null @@ -1,364 +0,0 @@ ------------------------------ MODULE ICS3Types ----------------------------- - -(*************************************************************************** - - This module is part of the TLA+ high-level specification for the - IBC Connection Handshake protocol (ICS3). - - This module includes common domain definitions that other modules will - extend. - - ***************************************************************************) - -EXTENDS Naturals, Sequences - -CONSTANTS MaxHeight, - AllConnectionIDs, - AllClientIDs, - AllChainIDs, - AllVersionSeqs - - -(******************************* InitClients ******************************** - - A set of records describing the possible initial values for the - clients on a chain. - - A client record contains the following fields: - - - consensusHeights -- a set of heights - Stores the set of all heights (i.e., consensus states) that this - client observed. At initialization time, the client only observes - the first height, so the only possible value for this record is - {1}. - - - clientID -- a string - The identifier of the client. This is expected as a parameter, since - it is a chain-specific field at initialization time. - - - latestHeight -- a number representing a (consensus) height - Stores the latest height among all the heights in consensusHeights. - Initialized to 1. - - ***************************************************************************) -InitClients(specificClientIDs) == - [ - consensusHeights : {{1}}, - clientID : specificClientIDs, - latestHeight : {1} - ] - - -(***************************** InitMsgs *********************************** - - The set of ConnectionHandshakeMessage records where message type is - ICS3MsgInit. - - This operator returns the set of all initialization messages, such that - the local end is the set 'le', and the remote end is set 're'. - - ***************************************************************************) -InitMsgs(le, re) == - [ - type : {"ICS3MsgInit"}, - parameters : [ - localEnd : le, - remoteEnd : re - ] - ] - - -(***************************** ICS3MessageTypes **************************** - - The set of valid message types that the ICS3Module can - handle, e.g., as incoming or outgoing messages. - - In the low-level connection handshake protocol, the four messages have - types: ConnOpenInit, ConnOpenTry, ConnOpenAck, ConnOpenConfirm. - In this high-level specification, we choose slightly different names, to - make an explicit distinction to the low-level protocol. Message types - are as follows: - ICS3MsgInit, ICS3MsgTry, ICS3MsgAck, and ICS3MsgConfirm. - For a complete description of the message record, see - ConnectionHandshakeMessage below. - - ***************************************************************************) -ICS3MessageTypes == - { - "ICS3MsgInit", - "ICS3MsgTry", - "ICS3MsgAck", - "ICS3MsgConfirm" - } - - -(******************************* ICS3ConnectionStates ********************** - - The set of valid states that a connection can be in. - - ***************************************************************************) -ICS3ConnectionStates == - { - "UNINIT", - "INIT", - "TRYOPEN", - "OPEN" - } - - -NullClientID == - "NULLClientID" - -NullConnectionID == - "NULLConnectionID" - - -(******************************* NullConnectionEnd ************************* - - A special record defining an uninitialized connection end record. - - ***************************************************************************) -NullConnectionEnd == - [ - connectionID |-> NullConnectionID, - clientID |-> NullClientID - ] - - -(******************************* NullConnectionParameters ****************** - - A record defining the special null connection parameters record. - - ***************************************************************************) -NullConnectionParameters == - [ - localEnd |-> NullConnectionEnd, - remoteEnd |-> NullConnectionEnd - ] - - -(******************************* ConnectionEnds ***************************** - - A set of connection end records. - A connection end record contains the following fields: - - - connectionID -- a string - Stores the identifier of this connection, specific to a chain. - - - clientID -- a string - Stores the identifier of the client running on this chain. - - ***************************************************************************) -ConnectionEnds == - [ - connectionID : AllConnectionIDs, - clientID : AllClientIDs - ] - - -(******************************* ConnectionParameters ********************** - - A set of connection parameter records. - A connection parameter record contains the following fields: - - - localEnd -- a connection end - Specifies the local connection details (i.e., connection ID and - client ID). - - - remoteEnd -- a connection end - Specifies the remote connection details. - - ***************************************************************************) -ConnectionParameters == - [ - localEnd : ConnectionEnds, - remoteEnd : ConnectionEnds - ] - \union - { - NullConnectionParameters - } - - -(******************************* NullConnection **************************** - - Initially, the connection on both chains is uninitialized, defined as - this special record. - - ***************************************************************************) -NullConnections == [ - parameters : {NullConnectionParameters}, - state : {"UNINIT"}, - version : AllVersionSeqs \ {<<>>} -] - - -(******************************* Connections ******************************* - - The set of possible connection records. - A connection record contains the following fields: - - - parameters -- a connection parameters record - Specifies the local plus remote ends. - - - state -- a connection state (see ConnectionStates set). - - ***************************************************************************) -Connections == - [ - parameters : ConnectionParameters, - state : ICS3ConnectionStates, - version : AllVersionSeqs - ] - - -(******************************* ConnProof ********************************* - - A set of records describing the possible values for connection proofs. - - A connection proof record contains a single field: - - - connection -- a connection record - This is the connection (in the local store of a chain) at the moment - when the module created this proof. - - ***************************************************************************) -ConnProofs == - [ - connection : Connections - ] - - -(******************************* Heights *********************************** - - The set of all possible heights that a chain can assume throughout any - execution. - - ***************************************************************************) -Heights == - 1..MaxHeight - - -(******************************* ClientProofs ******************************* - - A set of records describing the possible values for client proofs. - - A client proof record contains two fields: - - - latestHeight -- a number representing a height - The current height (latestHeight) of the client (in the local store of a - chain) at the moment when the ICS3 module created this proof. - - - consensusHeights -- a set of heights - The set of heights of the client colocated with module which created - this proof. - - ***************************************************************************) -ClientProofs == - [ - latestHeight : Heights, - consensusHeights : SUBSET Heights - ] - - -(*********************** ConnectionHandshakeMessages *********************** - - The set of ConnectionHandshakeMessage records. - These are connection handshake specific messages that two chains exchange - while executing the ICS3 protocol. - - ***************************************************************************) -ConnectionHandshakeMessages == - [ - type : {"ICS3MsgInit"}, - parameters : ConnectionParameters - ] - \union - [ - type : {"ICS3MsgTry"}, - parameters : ConnectionParameters, - proofHeight : Heights, - connProof : ConnProofs, - clientProof : ClientProofs, - version : AllVersionSeqs - ] - \union - [ - type : {"ICS3MsgAck"}, - parameters : ConnectionParameters, - proofHeight : Heights, - connProof : ConnProofs, - clientProof : ClientProofs, - version : AllVersionSeqs - ] - \union - [ - type : {"ICS3MsgConfirm"}, - parameters : ConnectionParameters, - proofHeight : Heights, - connProof : ConnProofs - ] - - - -(********************** MessageTypeIncludesConnProof *********************** - - Operator that evaluates to true if the message type (input parameter - 'type') refers to a message that includes a connection proof. - - ***************************************************************************) -MessageTypeIncludesConnProof(type) == - type \in {"ICS3MsgTry", "ICS3MsgAck", "ICS3MsgConfirm"} - - -(******************************* Clients *********************************** - - A set of records describing all the possible values for the - clients on a chain. - - See client record description above (within the InitClients operator). - - ***************************************************************************) -Clients == - [ - consensusHeights : SUBSET Heights, - clientID : AllClientIDs \union { NullClientID }, - latestHeight : Heights - ] - -(******************************* Stores ************************************* - - The set of store records. - A store record represents the local storage of a chain. This record - contains the following fields: - - - chainID -- a string - Stores the identifier of the chain where this module executes. - - - latestHeight -- a number representing a height - Describes the current height of the chain. - - - connection -- a connection record - Captures all the details of the connection on this chain. - For a full description of a connection record, see the - 'Environment.Connections' set. - - - client -- a client record. - Specifies the state of the client running on this chain. - - ***************************************************************************) -Stores == - [ - chainID : AllChainIDs, - latestHeight : Heights, - connection : Connections \union NullConnections, - client : Clients - ] - - -============================================================================= -\* Modification History -\* Last modified Thu Aug 20 14:14:03 CEST 2020 by ilinastoilkovska -\* Last modified Tue Jun 23 13:47:17 CEST 2020 by adi -\* Created Mon May 18 17:53:08 CEST 2020 by adi - diff --git a/docs/spec/connection-handshake/L2-tla/ICS3Utils.tla b/docs/spec/connection-handshake/L2-tla/ICS3Utils.tla deleted file mode 100644 index 98a52eb6d3..0000000000 --- a/docs/spec/connection-handshake/L2-tla/ICS3Utils.tla +++ /dev/null @@ -1,40 +0,0 @@ ------------------------------ MODULE ICS3Utils ----------------------------- - -(*************************************************************************** - - This module is part of the TLA+ high-level specification for the - IBC Connection Handshake protocol (ICS3). - - This module includes common action definitions that other modules need. - - ***************************************************************************) - -EXTENDS Naturals, FiniteSets, Sequences - - -(* Obtain a set from the given sequence. - *) -SequenceAsSet(seq) == - {seq[x] : x \in DOMAIN seq} - - -(* Checks if two version sequences overlap by taking the intersection of their - set representation. - *) -VersionSequencesOverlap(versionSeq1, versionSeq2) == - SequenceAsSet(versionSeq1) - \intersect - SequenceAsSet(versionSeq2) /= {} - - -(* Checks if the versions of the two chain stores overlap; a wrapper over the - base action 'VersionSequencesOverlap'. - *) -ChainVersionsOverlap(chainStore, otherChainStore) == - VersionSequencesOverlap(chainStore.connection.version, otherChainStore.connection.version) - - -============================================================================= -\* Modification History -\* Last modified Thu Aug 27 16:02:28 CEST 2020 by adi -\* Created Thu Aug 27 15:39:01 CEST 2020 by adi diff --git a/docs/spec/connection-handshake/L2-tla/README.md b/docs/spec/connection-handshake/L2-tla/README.md deleted file mode 100644 index fdb4b772ac..0000000000 --- a/docs/spec/connection-handshake/L2-tla/README.md +++ /dev/null @@ -1,64 +0,0 @@ -# IBC Connection Handshake (ICS3) TLA+ spec - - -This is a high-level TLA+ spec for the IBC Connection Handshake (ICS3) protocol. -The spec has four modules: - - - `Environment.tla` (main model lives here). - - `ICS3Module.tla` (the spec for the ICS3 module). - - `ICS3Types.tla` (common domain definitions). - - `ICS3Utils.tla` (common actions live here). - -To run this spec: - -1. add the modules in a new specification in the toolbox -2. specify values for constants `MaxHeight`, `MaxBufLen`, and `Concurrency`. -Two additional constants serve the version negotiation algorithm in the handshake: - - `MaxVersionNr` -- typical value `2`; or set to `1` to make version negotiation trivial; - - `VersionPickMode` -- typical value `"onAckDet"`; parametrizes the strategy for negotiating versions (see [below](#version-negotiation-modes)). - -Note the assumptions: - -``` -ASSUME MaxHeight > 4 -ASSUME MaxBufLen >= 1 -ASSUME VersionPickMode \in - {"overwrite", - "onTryDet", - "onTryNonDet", - "onAckDet", - "onAckNonDet"} -``` - -Typical values could be: `MaxHeight = 5` and `MaxBufLen = 2`. -The `Concurrency` flag enables/disables some non-determinsm of the environment, -specifically: - -- if TRUE, then the environment can non-deterministically update the light client of a chain. -This configuration simulates a liveness problem caused by the way relayers use `UpdateClient`, and will lead the model to stutter. -To be clear: the stuttering is not caused by a bug in the ICS3 protocol itself; this model simply captures the original faulty relayer algorithms surrounding the ICS3 protocol. -See more details in the [disclosure log](https://github.com/informalsystems/ibc-rs/pull/83). -- if FALSE, then the model should check correctly. - -3. add the invariant `ConsistencyInv` and `TypeInvariant` as well as the property (temporal formula) `Termination`. - -4. run the model checker. - -## Version negotiation modes - -We introduce different version picking modes, which are used to parameterize the way in which versions are picked during the connection handshake. That is, the constant `VersionPickMode` can take one of the following values: - - `overwrite` : a version is picked non-deterministically when handling `ICS3MsgTry`, local version gets overwritten with version(s) sent in datagrams; - - `onTryNonDet` : a version is picked non-deterministically when handling `ICS3MsgTry`, local version is chosen from intersection of local and datagram versions; - - `onTryDet` : a version is picked deterministically when handling `ICS3MsgTry`, local version is chosen from intersection of local and datagram versions; - - `onAckNonDet` : a version is picked non-deterministically when handling `ICS3MsgAck`, local version is chosen from intersection of local and datagram versions; - - `onAckDet` : a version is picked non-deterministically when handling `ICS3MsgAck`, local version is chosen from intersection of local and datagram versions. - - The table below details these modes: - -| Mode\Action | `HandleMsgTry(m)` | `HandleMsgAck(m)` | `HandleMsgConfirm(m)` | -|-------------|-----------------------------------------|-----------------------------|-----------------------------| -|`overwrite` | pick a version from `m.versions \intersect conn.versions` non-deterministically, send the picked version to counterparty in `ICS3MsgAck` | store `m.version` locally, send it to counterparty in `ICS3MsgConfirm` | store `m.version` locally | -|`onTryNonDet`| pick a version from `m.versions \intersect conn.versions` non-deterministically, send the picked version to counterparty in `ICS3MsgAck` | check if received version in `ICS3MsgAck` is in list of local versions, accept it if it is, send it to counterparty in `ICS3MsgConfirm` | check if received version is the same as one stored in connection end| -|`onTryDet` | pick a version from `m.versions \intersect conn.versions` deterministically (e.g. maximum), store & send the picked version to counterparty in `ICS3MsgAck` | check if received version in `ICS3MsgAck` is in list of local versions, accept & store it if it is, then send it to counterparty in `ICS3MsgConfirm` | check if received version is the same as one stored in connection end| -|`onAckNonDet`| send the value of `m.versions \intersect conn.versions` to counterparty in `ICS3MsgAck`, store the intersection locally | pick a version from `m.versions \intersect conn.versions` non-deterministically, send the intersection to counterparty in `ICS3MsgConfirm` | pick a version from `conn.versions` non-deterministically | -|`onAckDet`| send the value of the intersection `m.versions \intersect conn.versions` to counterparty in `ICS3MsgAck`, store the intersection locally | pick a version from `m.versions \intersect conn.versions` deterministically (e.g. maximum), send the intersection to counterparty in `ICS3MsgConfirm` | pick a version from `conn.versions` deterministically (e.g. maximum)| diff --git a/docs/spec/connection-handshake/README.md b/docs/spec/connection-handshake/README.md deleted file mode 100644 index 928fa1716b..0000000000 --- a/docs/spec/connection-handshake/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# IBC Connection Handshake Spec - -## Specification roadmap - -In this folder you will find a collection of documents representing English & TLA+ specifications for the IBC connection handshake problem [[ICS-003](https://github.com/cosmos/ibc/tree/master/spec/core/ics-003-connection-semantics)]. - -We currently cover two levels of abstraction of ICS2, in accordance with the [VDD workflow](https://github.com/informalsystems/VDD/blob/master/guide/guide.md): _level 1_ (abstract), _level 2_ (system model & distributed protocol). -Consequently, we break this work across the following documents: - -- [L1_2.md](./L1_2.md) covers the highest level of abstraction (level 1) and also includes an English spec of the system model and protocol (level 2); -- [L2-tla](./L2-tla/) is a directory with the TLA+ spec for level 2. diff --git a/docs/spec/relayer/Definitions.md b/docs/spec/relayer/Definitions.md deleted file mode 100644 index 5d8830a717..0000000000 --- a/docs/spec/relayer/Definitions.md +++ /dev/null @@ -1,292 +0,0 @@ -# Data structure and helper function definitions - -This document defines data types and helper functions used by the relayer logic. - -## Data Types - -### Chain - -Chain is a data structure that captures relayer's perspective of a given chain and contains all important -information that allows relayer to communicate with a chain. A provider is a Tendermint full node through -which a relayer read information about the given chain and submit transactions. A relayer maintains a list -of full nodes (*peerList*) as a current provider could be faulty, so it can be replaced by another full node. -For each chain a relayer is connected to, the relayer has a light client that provides the relayer -access to the trusted headers (used as part of data verification). - -```go -type Chain { - chainID string - clientID Identifier - peerList List> - provider Pair - lc LightClient -} -``` - -### Client state and consensus state - -```go -type ClientState { - chainID string - validatorSet List> - trustLevel Rational - trustingPeriod uint64 - unbondingPeriod uint64 - latestHeight Height - latestTimestamp uint64 - frozenHeight Maybe - upgradeCommitmentPrefix CommitmentPrefix - upgradeKey []byte - maxClockDrift uint64 - proofSpecs []ProofSpec -} -``` - -```go -type ConsensusState { - timestamp uint64 - validatorSet List> - commitmentRoot []byte -} -``` - -### Membership proof - -```go -type MembershipProof struct { - Height Height - Proof Proof -} -``` - -### Connection - -```go -type ConnectionEnd { - state ConnectionState - counterpartyConnectionIdentifier Identifier - counterpartyPrefix CommitmentPrefix - clientIdentifier Identifier - counterpartyClientIdentifier Identifier - version []string -} - -enum ConnectionState { - INIT, - TRYOPEN, - OPEN, -} -``` - -### Channel - -```go -type ChannelEnd { - state ChannelState - ordering ChannelOrder - counterpartyPortIdentifier Identifier - counterpartyChannelIdentifier Identifier - connectionHops [Identifier] - version string -} - -enum ChannelState { - INIT, - TRYOPEN, - OPEN, - CLOSED, -} - -enum ChannelOrder { - ORDERED, - UNORDERED, -} -``` - -```go -type Packet { - sequence uint64 - timeoutHeight Height - timeoutTimestamp uint64 - sourcePort Identifier - sourceChannel Identifier - destPort Identifier - destChannel Identifier - data []byte -} -``` - -```go -type PacketRecv { - packet Packet - proof CommitmentProof - proofHeight Height -} -``` - -```go -type PacketAcknowledgement { - packet Packet - acknowledgement byte[] - proof CommitmentProof - proofHeight Height -} -``` - -## Helper functions - -We assume the existence of the following helper functions: - -```go -// Returns channel end with a commitment proof. -GetChannel(chain Chain, - portId Identifier, - channelId Identifier, - proofHeight Height) (ChannelEnd, CommitmentProof, Error) - -// Returns connection end with a commitment proof. -GetConnection(chain Chain, - connectionId Identifier, - proofHeight Height) (ConnectionEnd, CommitmentProof, Error) - - -// Returns client state with a commitment proof. -GetClientState(chain Chain, - clientId Identifier, - proofHeight Height) (ClientState, CommitmentProof, Error) - -// Returns consensus state with a commitment proof. -GetConsensusState(chain Chain, - clientId Identifier, - targetHeight Height, - proofHeight Height) (ConsensusState, CommitmentProof, Error) - - -// Returns packet commitment with a commitment proof. -GetPacketCommitment(chain Chain, - portId Identifier, - channelId Identifier, - sequence uint64, - proofHeight Height) (bytes, CommitmentProof, Error) - -// Returns next recv sequence number with a commitment proof. -GetNextSequenceRecv(chain Chain, - portId Identifier, - channelId Identifier, - proofHeight Height) (uint64, CommitmentProof, Error) - - -// Returns next recv sequence number with a commitment proof. -GetNextSequenceAck(chain Chain, - portId Identifier, - channelId Identifier, - proofHeight Height) (uint64, CommitmentProof, Error) - - -// Returns packet acknowledgment with a commitment proof. -GetPacketAcknowledgement(chain Chain, - portId Identifier, - channelId Identifier, - sequence uint64, - proofHeight Height) (bytes, CommitmentProof, Error) - - -// Returns packet receipt with a commitment proof. -GetPacketReceipt(chain Chain, - portId Identifier, - channelId Identifier, - sequence uint64, - proofHeight Height) (String, CommitmentProof, Error) - - -// Returns estimate of the consensus height on the given chain. -GetConsensusHeight(chain Chain) Height - -// Returns estimate of the current time on the given chain. -GetCurrentTimestamp(chainB) uint64 - -// Verify that the data is written at the given path using provided membership proof and the root hash. -VerifyMembership(rootHash []byte, - proofHeight Height, - proof MembershipProof, - path String, - data []byte) boolean - -// Create IBC datagram as part of processing event at chainA. -CreateDatagram(ev IBCEvent, - chainA Chain, - chainB Chain, - installedHeight Height) (IBCDatagram, Error) - -// Create UpdateClient datagrams from the list of signed headers -CreateUpdateClientDatagrams(shs []SignedHeader) IBCDatagram[] - -// Submit given datagram to a given chain -Submit(chain Chain, datagram IBCDatagram) Error - -// Return the correspondin chain for a given chainID -// We assume that the relayer maintains a map of known chainIDs and the corresponding chains. -GetChain(chainID String) Chain -``` - -For functions that return proof, if `error == nil`, then the returned value is being verified. -The value is being verified using the header's app hash that is provided by the corresponding light client. - -Helper functions listed above assume querying (parts of the) application state using Tendermint RPC. For example, -`GetChannel` relies on `QueryChannel`. RPC calls can fail if: - -- no response is received within some timeout or -- malformed response is received. - -In both cases, error handling logic should be defined by the caller. For example, in the former case, the caller might -retry sending the same request to a same provider (full node), while in the latter case the request might be sent to -some other provider node. Although these kinds of errors could be due to network infrastructure issues, it is normally -simpler to blame the provider (assume implicitly network is always correct and reliable). Therefore, correct provider -always respond timely with a correct response, while in case of errors we consider the provider node faulty, and then -we replace it with a different node. - -We assume the following error types: - -```golang -enum Error { - RETRY, // transient processing error (for example due to optimistic send); function can be retried later - DROP, // event has already been received by the destination chain so it should be dropped - BADPROVIDER, // provider does not reply timely or with a correct data; it normally leads to replacing provider - BADLIGHTCLIENT // light client does not reply timely or with a correct data -} -``` - -We now show the pseudocode for one of those functions: - -```go -func GetChannel(chain Chain, - portId Identifier, - channelId Identifier, - proofHeight Height) (ChannelEnd, CommitmentProof, Error) { - - // Query provable store exposed by the full node of chain. - // The path for the channel end is at channelEnds/ports/{portId}/channels/{channelId}". - // The channel and the membership proof returned is read at height proofHeight - 1. - channel, proof, error = QueryChannel(chain.provider, portId, channelId, proofHeight) - if error != nil { return (nil, nil, Error.BADPROVIDER) } - - header, error = GetHeader(chain.lc, proofHeight) // get header for height proofHeight using light client - if error != nil { return (nil, nil, Error.BADLIGHTCLIENT) } // return if light client can't provide header for the given height - - // verify membership of the channel at path channelEnds/ports/{portId}/channels/{channelId} using - // the root hash header.AppHash - if !VerifyMembership(header.AppHash, proofHeight, proof, channelPath(portId, channelId), channel) { - // membership check fails; therefore provider is faulty. Try to elect new provider - return (nil, nil, Error.BadProvider) - } - - return (channel, proof, nil) -} -``` - -If *LATEST_HEIGHT* is passed as a parameter, the data should be read (and the corresponding proof created) -at the most recent height. - - - - diff --git a/docs/spec/relayer/Packets.md b/docs/spec/relayer/Packets.md deleted file mode 100644 index b9e24633ef..0000000000 --- a/docs/spec/relayer/Packets.md +++ /dev/null @@ -1,196 +0,0 @@ -# IBC packet handling - -This document specifies datagram creation logic for packets. It is used by the relayer. - -## Packet related IBC events - -```go -type SendPacketEvent { - height Height - sequence uint64 - timeoutHeight Height - timeoutTimestamp uint64 - sourcePort Identifier - sourceChannel Identifier - destPort Identifier - destChannel Identifier - data []byte -} -``` - -```go -type WriteAcknowledgementEvent { - height Height - port Identifier - channel Identifier - sequence uint64 - timeoutHeight Height - timeoutTimestamp uint64 - data []byte - acknowledgement []byte -} -``` - -## Event handlers - -### SendPacketEvent handler - -Successful handling of *SendPacketEvent* leads to *PacketRecv* datagram creation. - -// NOTE: Stateful relayer might keep packet that are not acked in the state so the following logic -// can be a bit simpler. - -```golang -func CreateDatagram(ev SendPacketEvent, - chainA Chain, // source chain - chainB Chain, // destination chain - proofHeight Height) (PacketRecv, Error) { - - // Stage 1 - // Verify if packet is committed to chain A and it is still pending (commitment exists) - - packetCommitment, packetCommitmentProof, error = - GetPacketCommitment(chainA, ev.sourcePort, ev.sourceChannel, ev.sequence, proofHeight) - if error != nil { return (nil, error) } - - if packetCommitment == nil OR - packetCommitment != hash(concat(ev.data, ev.timeoutHeight, ev.timeoutTimestamp)) { - // invalid event; bad provider - return (nil, Error.BADPROVIDER) - } - - // Stage 2 - // Execute checks IBC handler on chainB will execute - - channel, proof, error = GetChannel(chainB, ev.destPort, ev.destChannel, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if channel != nil AND - (channel.state == CLOSED OR - ev.sourcePort != channel.counterpartyPortIdentifier OR - ev.sourceChannel != channel.counterpartyChannelIdentifier) { return (nil, Error.DROP) } - - if channel == nil OR channel.state != OPEN { return (nil, Error.RETRY) } - // TODO: Maybe we shouldn't even enter handle loop for packets if the corresponding channel is not open! - - connectionId = channel.connectionHops[0] - connection, proof, error = GetConnection(chainB, connectionId, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if connection == nil OR connection.state != OPEN { return (nil, Error.RETRY) } - - if ev.timeoutHeight != 0 AND GetConsensusHeight(chainB) >= ev.timeoutHeight { return (nil, Error.DROP) } - if ev.timeoutTimestamp != 0 AND GetCurrentTimestamp(chainB) >= ev.timeoutTimestamp { return (nil, Error.DROP) } - - // we now check if this packet is already received by the destination chain - if channel.ordering === ORDERED { - nextSequenceRecv, proof, error = GetNextSequenceRecv(chainB, ev.destPort, ev.destChannel, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if ev.sequence != nextSequenceRecv { return (nil, Error.DROP) } // packet has already been delivered by another relayer - - } else { - // Note that absence of receipt (packetReceipt == nil) is also proven also and we should be able to verify it. - packetReceipt, proof, error = - GetPacketReceipt(chainB, ev.destPort, ev.destChannel, ev.sequence, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if packetReceipt != nil { return (nil, Error.DROP) } // packet has already been delivered by another relayer - } - - // Stage 3 - // Build datagram as all checks has passed - packet = Packet { - sequence: ev.sequence, - timeoutHeight: ev.timeoutHeight, - timeoutTimestamp: ev.timeoutTimestamp, - sourcePort: ev.sourcePort, - sourceChannel: ev.sourceChannel, - destPort: ev.destPort, - destChannel: ev.destChannel, - data: ev.data - } - - return (PacketRecv { packet, packetCommitmentProof, proofHeight }, nil) -} -``` - -### WriteAcknowledgementEvent handler - -Successful handling of *WriteAcknowledgementEvent* leads to *PacketAcknowledgement* datagram creation. - -```golang -func CreateDatagram(ev WriteAcknowledgementEvent, - chainA Chain, // source chain - chainB Chain, // destination chain - proofHeight Height) (PacketAcknowledgement, Error) { - - // Stage 1 - // Verify if acknowledment is committed to chain A and it is still pending - packetAck, PacketStateProof, error = - GetPacketAcknowledgement(chainA, ev.port, ev.channel, ev.sequence, proofHeight) - if error != nil { return (nil, error) } - - if packetAck == nil OR packetAck != hash(ev.acknowledgement) { - // invalid event; bad provider - return (nil, Error.BADPROVIDER) - } - - // Stage 2 - // Execute checks IBC handler on chainB will execute - - // Fetch channelEnd from the chainA to be able to compute port and chain ids on destination chain - channelA, proof, error = GetChannel(chainA, ev.port, ev.channel, ev.height) - if error != nil { return (nil, error) } - - channelB, proof, error = - GetChannel(chainB, channelA.counterpartyPortIdentifier, channelA.counterpartyChannelIdentifier, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if channelB == nil OR channel.state != OPEN { (nil, Error.DROP) } - // Note that we checked implicitly above that counterparty identifiers match each other - - connectionId = channelB.connectionHops[0] - connection, proof, error = GetConnection(chainB, connectionId, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if connection == nil OR connection.state != OPEN { return (nil, Error.DROP) } - - // verify the packet is sent by chainB and hasn't been cleared out yet - packetCommitment, packetCommitmentProof, error = - GetPacketCommitment(chainB, channelA.counterpartyPortIdentifier, - channelA.counterpartyChannelIdentifier, ev.sequence, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if packetCommitment == nil OR - packetCommitment != hash(concat(ev.data, ev.timeoutHeight, ev.timeoutTimestamp)) { - // invalid event; bad provider - return (nil, Error.BADPROVIDER) - } - - // abort transaction unless acknowledgement is processed in order - if channelB.ordering === ORDERED { - nextSequenceAck, proof, error = - GetNextSequenceAck(chainB, channelA.counterpartyPortIdentifier, - channelA.counterpartyChannelIdentifier, ev.sequence, LATEST_HEIGHT) - if error != nil { return (nil, error) } - - if ev.sequence != nextSequenceAck { return (nil, Error.DROP) } - } - - // Stage 3 - // Build datagram as all checks has passed - packet = Packet { - sequence: ev.sequence, - timeoutHeight: ev.timeoutHeight, - timeoutTimestamp: ev.timeoutTimestamp, - sourcePort: channelA.counterpartyPortIdentifier, - sourceChannel: channelA.counterpartyChannelIdentifier, - destPort: ev.port, - destChannel: ev.channel, - data: ev.data - } - - return (PacketAcknowledgement { packet, ev.acknowledgement, PacketStateProof, proofHeight }, nil) -} -``` diff --git a/docs/spec/relayer/Relayer.md b/docs/spec/relayer/Relayer.md deleted file mode 100644 index 4158c38dfe..0000000000 --- a/docs/spec/relayer/Relayer.md +++ /dev/null @@ -1,200 +0,0 @@ -# Relayer Specification - -Relayers are processes that provide connection layer of the IBC protocol. In the IBC protocol, on chain -modules do not have a way of directly sending a message to each other; this is the responsibility of relayer -processes. Modules signal its intention to send a message by writing data in its data store at the -defined location, and make those data (with corresponding proofs) available to external parties. -Relayer processes read (we say also scan) the state of each chain, construct appropriate IBC datagrams, -verify the corresponding proofs and submit valid datagrams to destination chain. -We assume existence of multiple relayers, where some relayers could be faulty (behave arbitrarily), -but there is always at least a single correct relayer. We don't make assumptions on the maximum number of -faulty relayers. - -For the purpose of this specification we assume existence of two on chain modules A and B, that executes -IBC protocol. We say that a module A (or B) sends an IBC datagram m to a module B (or A) when a correct -relayer can construct valid datagram m by scanning the state of the chain A. We say that a module A receives -an IBC datagram m, when m was processed by the module A on chain. We assume that modules -are correct. - -Correct relayers need to ensure the following properties: - -**[ICS18-Delivery]**: If a module A sends an IBC datagram m to a module B, then m is -eventually received by the module B. - -**[ICS18-Validity]**: If a module B receives an IBC datagram m from a module A, -then m was sent by the module A to the module B. - -## System model - -We assume that a correct relayer operates in the following model: - -### Connected chains - -Relayer transfers data between two chains: chainA and chainB. For simplicity, we assume Tendermint chains. -Each chain operates under Tendermint security model: -- given a block b at height h committed at time `t = b.Header.Time`, `+2/3` of voting power behaves correctly -at least before `t + UNBONDING_PERIOD`, where `UNBONDING_PERIOD` is a system parameter (typically order of weeks). -Validators sets can be changed in every block, and we don't assume any constraint on the way validators are changed -(application specific logic). - -Furthermore, we assume that blockchain applications that operate on top of chainA and chainB writes -relevant data into Merkleised data store (for example IBC packets), and that parts of the store are publicly -available (so relayers can access it). - -In order to access IBC relevant data, a relayer needs to establish connections with full nodes (correct) from -both chains. Note that there is no constrain on number of faulty full nodes: we can only assume that a correct relayer -will eventually have access to a correct full node. - -### Data availability - -Note that data written to a store at height *h* as part of executing block *b* (`b.Height = h`) is effectively committed by -the next block (at height h+1). The reason is the fact that the data store root hash as an effect of executing block at -height h is part of the block header at height h+1. Therefore, data read at height h is available until time -`t = b.Header.Time + UNBONDING_PERIOD`, where `b.Header.Height = h+1`. After time *t* we cannot trust that data anymore. -Note that data present in the store are re-validated by each new block: data added/modified at block *h* are still -valid even if not altered after, as they are still "covered" by the root hash of the store. - -Therefore UNBONDING_PERIOD gives absolute time bound during which relayer needs to transfer data read at source chain -to the destination chain. As we will explain below, due to fork detection and accountability protocols, the effective -data availability period will be shorter than UNBONDING_PERIOD. - -### Data verification - -As connected chains in IBC do not blindly trust each other, data coming from the opposite chain must be verified at -the destination before being acted upon. Data verification in IBC is implemented by relying on the concept of light client. -Light client is a process that by relying on an initial trusted header (subjective initialisation), verifies and maintains -set of trusted headers. Note that a light client does not maintain full blockchain and does not execute (verify) application -transitions. It operates by relying on the Tendermint security model, and by applying header verification logic that operates -only on signed headers (header + corresponding commit). - -More details about light client assumptions and protocols can be found -[here](https://github.com/tendermint/spec/tree/master/rust-spec/lightclient). For the purpose of this document, we assume -that a relayer has access to the light client node that provides trusted headers. -Given a data d read at a given path at height h with a proof p, we assume existence of a function -`VerifyMembership(header.AppHash, h, proof, path, d)` that returns `true` if data was committed by the corresponding -chain at height *h*. The trusted header is provided by the corresponding light client. - -## Relayer algorithm - -The main relayer event loop is a pipeline of four stages. Assuming some IBC event at height `h` on `chainA`, -the relayer: - -1. Determines destination chain (`chainB`) -2. Updates (on `chainB`) the IBC client for `chainA` to a certain height `H` where `H >= h+1`. -3. Creates IBC datagram at height `H-1`. -4. Submits the datagram from stage (2) to `chainB`. - -Note that an IBC event at height `h` corresponds to the modifications to the data store made as part of executing -block at height `h`. The corresponding proof (that data is indeed written to the data store) can be verified using -the data store root hash that is part of the header at height `h+1`. - -Once stage 2 finishes correctly, stage 3 should succeed assuming that `chainB` has not already processed the event. The -interface between stage 2 and stage 3 is just the height `H`. Once stage 3 finishes correctly, stage 4 should -succeed. The interface between stage 3 and stage 4 is an IBC datagram. - -We assume that the corresponding light client is correctly installed on each chain. - -Data structures and helper function definitions are provided -[here](https://github.com/informalsystems/ibc-rs/blob/master/docs/spec/relayer/Definitions.md). - -```golang -func handleEvent(ev, chainA) Error { - // NOTE: we don't verify if event data are valid at this point. We trust full node we are connected to - // until some verification fails. - - // Stage 1. - // Determine destination chain - chainB, error = getDestinationInfo(ev, chainA) - if error != nil { return error } - - // Stage 2. - // Update on `chainB` the IBC client for `chainA` to height `>= targetHeight`. - targetHeight = ev.height + 1 - // See the code for `updateIBCClient` below. - proofHeight, error := updateIBCClient(chainB, chainA, targetHeight) - if error != nil { return error } - - // Stage 3. - // Create the IBC datagrams including `ev` & verify them. - datagram, error = CreateDatagram(ev, chainA, chainB, proofHeight) - if error != nil { return error } - - // Stage 4. - // Submit datagrams. - error = Submit(chainB, datagram) - if error != nil { return error } -} - -func getDestinationInfo(ev IBCEvent, chain Chain) (Chain, Error) { - switch ev.type { - case SendPacketEvent: - chainId, error = getChainId(chain, ev.sourcePort, ev.sourceChannel, ev.Height) - if error != nil { return (nil, error) } - - chain = GetChain(chainId) - if chain == nil { return (nil, Error.DROP) } - - return (chain, nil) - - case WriteAcknowledgementEvent: - chainId, error = getChainId(chain, ev.Port, ev.Channel, ev.Height) - if error != nil { return (nil, error) } - - chain = GetChain(chainId) - if chain == nil { nil, Error.DROP } - - return (chain, nil) - } -} - -// Return chaindId of the destination chain based on port and channel info for the given chain -func getChainId(chain Chain, port Identifier, channel Identifier, height Height) (String, Error) { - channel, proof, error = GetChannel(chain, port, channel, height) - if error != nil { return (nil, error) } - - connectionId = channel.connectionHops[0] - connection, proof, error = GetConnection(chain, connectionId, height) - if error != nil { return (nil, error) } - - clientState, proof, error = GetClientState(chain, connection.clientIdentifier, height) - if error != nil { return (nil, error) } - - return (clientState.chainID, error) -} - -// Perform an update on `dest` chain for the IBC client for `src` chain. -// Preconditions: -// - `src` chain has height greater or equal to `targetHeight` -// Postconditions: -// - returns the installedHeight >= targetHeight -// - return error if some of verification steps fail -func updateIBCClient(dest Chain, src Chain, targetHeight Height) -> (Height, Error) { - - clientState, proof, error = GetClientState(dest, dest.clientId, LATEST_HEIGHT) - if error != nil { return (nil, error) } - // NOTE: What if a full node we are connected to send us stale (but correct) information regarding targetHeight? - - // if installed height is smaller than the targetHeight, we need to update client with targetHeight - while (clientState.latestHeight < targetHeight) { - // Do an update to IBC client for `src` on `dest`. - shs, error = src.lc.getMinimalSet(clientState.latestHeight, targetHeight) - if error != nil { return (nil, error) } - - error = dest.submit(createUpdateClientDatagrams(shs)) - if error != nil { return (nil, error) } - - clientState, proof, error = GetClientState(dest, dest.clientId, LATEST_HEIGHT) - if error != nil { return (nil, error) } - } - - // NOTE: semantic check of the installed header is done using fork detection component - return { clientState.Height, nil } -} -``` - - - - - - - diff --git a/docs/spec/tla/client/Chain.tla b/docs/spec/tla/client/Chain.tla deleted file mode 100644 index 765c9b2450..0000000000 --- a/docs/spec/tla/client/Chain.tla +++ /dev/null @@ -1,137 +0,0 @@ ------------------------------ MODULE Chain ------------------------------ - -(*************************************************************************** - This module models the behavior of a chain running the IBC Core Client - Protocol. -****************************************************************************) - -EXTENDS Integers, FiniteSets, ICS02ClientHandlers, ICS02Definitions - -CONSTANTS - MaxHeight, \* maximal chain height - ChainID, \* chain identifier - NrClients, \* number of clients that will be created on the chain - ClientIDs \* a set of counterparty client IDs - -VARIABLES - chainStore, \* chain store, containing a client state for each client - incomingDatagrams, \* set of incoming datagrams - history \* history variable - -vars == <> -Heights == 1..MaxHeight \* set of possible heights of the chains in the system - -\* @type: (CHAINSTORE, Str) => Int; -GetClientNr(store, clientID) == - IF \E clientNr \in DOMAIN chainStore.clientStates : - store.clientStates[clientNr].clientID = clientID - THEN CHOOSE clientNr \in DOMAIN store.clientStates : - store.clientStates[clientNr].clientID = clientID - ELSE 0 - -(*************************************************************************** - Client update operators - ***************************************************************************) -\* Update the clients on chain with chainID, -\* using the client datagrams generated by the relayer -\* (Handler operators defined in ClientHandlers.tla) -LightClientUpdate(chainID, store, clientID, datagrams) == - \* create client - LET clientCreatedStore == HandleCreateClient(store, clientID, datagrams) IN - \* update client - LET clientUpdatedStore == HandleClientUpdate(clientCreatedStore, clientID, datagrams, MaxHeight) IN - - clientUpdatedStore - -(*************************************************************************** - Chain actions - ***************************************************************************) -\* Advance the height of the chain until MaxHeight is reached -AdvanceChain == - /\ chainStore.height + 1 \in Heights - /\ chainStore' = [chainStore EXCEPT !.height = chainStore.height + 1] - /\ UNCHANGED <> - -\* Handle the datagrams and update the chain state -HandleIncomingDatagrams == - /\ incomingDatagrams /= {} - /\ \E clientID \in ClientIDs : - /\ chainStore' = LightClientUpdate(ChainID, chainStore, clientID, incomingDatagrams) - /\ history' = [history EXCEPT ![clientID] = - LET clientNr == GetClientNr(chainStore', clientID) IN - IF /\ clientNr /= 0 - /\ ~history[clientID].created - /\ chainStore.clientStates[clientNr].clientID = nullClientID - /\ chainStore'.clientStates[clientNr].clientID /= nullClientID - THEN [created |-> TRUE, updated |-> history[clientID].updated] - ELSE IF /\ clientNr /= 0 - /\ history[clientID].created - /\ chainStore.clientStates[clientNr].heights /= chainStore'.clientStates[clientNr].heights - /\ chainStore.clientStates[clientNr].heights \subseteq chainStore'.clientStates[clientNr].heights - THEN [created |-> history[clientID].created, updated |-> TRUE] - ELSE history[clientID] - ] - /\ incomingDatagrams' = {dgr \in incomingDatagrams : dgr.clientID /= clientID} - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -\* Initially -\* - each chain is initialized to InitChain (defined in RelayerDefinitions.tla) -\* - pendingDatagrams for each chain is empty -\* - the packetSeq is set to 1 -Init == - /\ chainStore = ICS02InitChainStore(NrClients, ClientIDs) - /\ incomingDatagrams = {} - -\* Next state action -\* The chain either -\* - advances its height -\* - receives datagrams and updates its state -\* - sends a packet if the appPacketSeq is not bigger than MaxPacketSeq -\* - acknowledges a packet -Next == - \/ AdvanceChain - \/ HandleIncomingDatagrams - \/ UNCHANGED vars - -Fairness == - /\ WF_vars(AdvanceChain) - /\ WF_vars(HandleIncomingDatagrams) - -(*************************************************************************** - Invariants - ***************************************************************************) -\* Type invariant -\* ChainStores and Datagrams are defined in RelayerDefinitions.tla -TypeOK == - /\ chainStore \in ChainStores(NrClients, ClientIDs, MaxHeight) - /\ incomingDatagrams \in SUBSET Datagrams(ClientIDs, MaxHeight) - -\* two clients with the same ID cannot be created -CreatedClientsHaveDifferentIDs == - (\A clientNr \in 1..NrClients : - chainStore.clientStates[clientNr].clientID /= nullClientID) - => (\A clientNr1 \in 1..NrClients : \A clientNr2 \in 1..NrClients : - clientNr1 /= clientNr2 - => chainStore.clientStates[clientNr1].clientID /= - chainStore.clientStates[clientNr2].clientID) - -\* only created clients can be updated -UpdatedClientsAreCreated == - \A clID \in ClientIDs : - history[clID].updated => history[clID].created - -(*************************************************************************** - Properties - ***************************************************************************) -\* it ALWAYS holds that the height of the chain does not EVENTUALLY decrease -HeightDoesntDecrease == - [](\A h \in Heights : chainStore.height = h - => <>(chainStore.height >= h)) - -============================================================================= -\* Modification History -\* Last modified Thu Apr 15 12:17:59 CEST 2021 by ilinastoilkovska -\* Created Fri Jun 05 16:56:21 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/client/ICS02ClientHandlers.tla b/docs/spec/tla/client/ICS02ClientHandlers.tla deleted file mode 100644 index 2e31ed2df5..0000000000 --- a/docs/spec/tla/client/ICS02ClientHandlers.tla +++ /dev/null @@ -1,97 +0,0 @@ ------------------------ MODULE ICS02ClientHandlers ------------------------- - -(*************************************************************************** - This module contains definitions of operators that are used to handle - client create and update datagrams. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, ICS02Definitions - -(*************************************************************************** - Client datagram handlers - ***************************************************************************) - -\* Handle "CreateClient" datagrams -\* @type: (CHAINSTORE, Str, Set(DATAGRAM)) => CHAINSTORE; -HandleCreateClient(chain, clientID, datagrams) == - \* get "CreateClient" datagrams with valid clientID - LET createClientDgrs == {dgr \in datagrams : - /\ dgr.type = "CreateClient" - /\ dgr.clientID = clientID} IN - \* get heights in datagrams with correct counterparty clientID for chainID - LET createClientHeights == {dgr.height : dgr \in createClientDgrs} IN - \* get next available client number where a client can be created - LET nextClientNr == - IF /\ \A clientNr \in DOMAIN chain.clientStates : - chain.clientStates[clientNr].clientID /= clientID - /\ \E clientNr \in DOMAIN chain.clientStates : - chain.clientStates[clientNr].clientID = nullClientID - THEN CHOOSE clientNr \in DOMAIN chain.clientStates : - \/ /\ clientNr = 1 - /\ chain.clientStates[clientNr].clientID = nullClientID - \/ /\ clientNr - 1 \in DOMAIN chain.clientStates - /\ chain.clientStates[clientNr - 1].clientID /= nullClientID - /\ chain.clientStates[clientNr].clientID = nullClientID - ELSE 0 IN - - \* new chain record with client created - LET clientCreateChain == - IF nextClientNr \in DOMAIN chain.clientStates - THEN [chain EXCEPT !.clientStates = - [chain.clientStates EXCEPT ![nextClientNr] = - \* if the slot at nextClientNr is an empty slot - IF /\ chain.clientStates[nextClientNr].clientID = nullClientID - \* if the set of heights from datagrams is not empty - /\ createClientHeights /= {} - \* then create a client with clientID at the slot nextClientNr - THEN [clientID |-> clientID, - heights |-> {Max(createClientHeights)}] - \* otherwise, discard CreateClient datagrams - ELSE chain.clientStates[nextClientNr] - ]] - ELSE chain IN - - clientCreateChain - -\* Handle "ClientUpdate" datagrams -\* @type: (CHAINSTORE, Str, Set(DATAGRAM), Int) => CHAINSTORE; -HandleClientUpdate(chain, clientID, datagrams, MaxHeight) == - \* get the client number of the client with clientID - LET clientNr == IF \E clientNr \in DOMAIN chain.clientStates : - chain.clientStates[clientNr].clientID = clientID - THEN CHOOSE clientNr \in DOMAIN chain.clientStates : - chain.clientStates[clientNr].clientID = clientID - ELSE 0 IN - \* max client height of client ID - LET maxClientHeight == IF clientNr /= 0 - THEN Max(chain.clientStates[clientNr].heights) - ELSE MaxHeight IN - \* get "ClientUpdate" datagrams with valid clientID - LET updateClientDgrs == {dgr \in datagrams : - /\ dgr.type = "ClientUpdate" - /\ dgr.clientID = clientID - /\ maxClientHeight < dgr.height} IN - \* get heights in datagrams with correct counterparty clientID for chainID - LET updateClientHeights == {dgr.height : dgr \in updateClientDgrs} IN - - \* new chain record with client updated - LET clientUpdatedChain == - IF clientNr \in DOMAIN chain.clientStates - THEN [chain EXCEPT !.clientStates = - [chain.clientStates EXCEPT ![clientNr] = - \* if clientNr is a valid client number - IF /\ clientNr \in DOMAIN chain.clientStates - \* if the slot at clientNr holds a client with clientID - /\ chain.clientStates[clientNr].clientID = clientID - THEN [chain.clientStates[clientNr] EXCEPT !.heights = - chain.clientStates[clientNr].heights \union updateClientHeights] - ELSE chain.clientStates[clientNr] - ]] - ELSE chain IN - - clientUpdatedChain - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 18:46:39 CEST 2021 by ilinastoilkovska -\* Created Tue Apr 07 16:42:47 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/client/ICS02Definitions.tla b/docs/spec/tla/client/ICS02Definitions.tla deleted file mode 100644 index d7b367166a..0000000000 --- a/docs/spec/tla/client/ICS02Definitions.tla +++ /dev/null @@ -1,120 +0,0 @@ --------------------------- MODULE ICS02Definitions -------------------------- - -(*************************************************************************** - This module contains definitions of operators that are shared between the - different modules, and which are relevant for ICS02. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences - -(************************ TYPE ALIASES FOR SNOWCAT *************************) -(* @typeAlias: CLIENTSTATE = - [ - clientID: Str, - heights: Set(Int) - ]; -*) -(* @typeAlias: CHAINSTORE = - [ - height: Int, - clientStates: Int -> CLIENTSTATE - ]; -*) -(* @typeAlias: DATAGRAM = - [ - type: Str, - clientID: Str, - height: Int - ]; -*) - -(********************** Common operator definitions ***********************) -ChainIDs == {"chainA", "chainB"} - -nullHeight == 0 -nullClientID == "none" - -Max(S) == CHOOSE x \in S: \A y \in S: y <= x - -BoundedSeq(S, bound) == UNION {[1..n -> S] : n \in 1..bound} - -SetHeights(h1, h2) == {h \in 1..10 : h1 <= h /\ h <= h2} - -(****************************** ClientStates ******************************* - A client state is a set of heights - ***************************************************************************) -ClientStates(ClientIDs, maxHeight) == - [ - clientID : ClientIDs, - heights : SUBSET(1..maxHeight) - ] - -NullClientState == - [ - clientID |-> nullClientID, - heights |-> {} - ] - -(******************************** ChainStores ****************************** - A set of chain store records, with fields relevant for ICS02. - A chain store record contains the following fields: - - - height : an integer between nullHeight and MaxHeight. - Stores the current height of the chain. - - - counterpartyClientHeights : a set of integers between 1 and MaxHeight - Stores the heights of the client for the counterparty chain. - - ***************************************************************************) -ChainStores(NrClients, ClientIDs, maxHeight) == - [ - height : 1..maxHeight, - clientStates : [1..NrClients -> ClientStates(ClientIDs, maxHeight) \union {NullClientState}] - ] - -(******************************** Datagrams ********************************) -\* Set of datagrams -Datagrams(ClientIDs, maxHeight) == - [type : {"CreateClient"}, clientID : ClientIDs, height : 1..maxHeight] - \union - [type : {"ClientUpdate"}, clientID : ClientIDs, height : 1..maxHeight] - -\* Set of client datagrams for a specific set ClientIDs of client IDs. -ClientDatagrams(ClientIDs, Heights) == - [type : {"CreateClient"}, clientID : ClientIDs, height : Heights] - \union - [type : {"ClientUpdate"}, clientID : ClientIDs, height : Heights] - -\* Null datagram -NullDatagram == - [type |-> "null"] - -(*************************************************************************** - Initial value of a chain store for ICS02 - ***************************************************************************) -\* Initial value of the chain store for ICS02: -\* - height is initialized to 1 -\* - the counterparty clients are uninitialized -ICS02InitChainStore(NrClients, ClientIDs) == - [ - height |-> 1, - clientStates |-> [clientNr \in 1..NrClients |-> NullClientState] - ] - -(*************************************************************************** - Client helper operators - ***************************************************************************) - -\* get the ID of chainID's counterparty chain -GetCounterpartyChainID(chainID) == - IF chainID = "chainA" THEN "chainB" ELSE "chainA" - -\* get the latest height of chainID -\* @type: (CHAINSTORE) => Int; -GetLatestHeight(chain) == - chain.height - -========================================================================= -\* Modification History -\* Last modified Thu Apr 15 12:17:55 CEST 2021 by ilinastoilkovska -\* Created Tue Oct 06 16:26:25 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/client/ICS02SingleChainEnvironment.tla b/docs/spec/tla/client/ICS02SingleChainEnvironment.tla deleted file mode 100644 index 41a58038ee..0000000000 --- a/docs/spec/tla/client/ICS02SingleChainEnvironment.tla +++ /dev/null @@ -1,107 +0,0 @@ --------------------- MODULE ICS02SingleChainEnvironment -------------------- - -(*************************************************************************** - A TLA+ specification of the IBC client protocol (ICS02). This module models - a system consisting of one chain that can create multiple clients, and which - operates in an environment that overapproximates the behavior of a correct - relayer. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, ICS02Definitions - -CONSTANTS - \* @type: Int; - MaxHeight, \* maximal height of all the chains in the system - \* @type: Int; - NrClientsChainA, \* number of clients that will be created on the chain - \* @type: Set(Str); - ClientIDsChainA \* a set of counterparty client IDs for the chain - -ASSUME MaxHeight < 10 - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: Set(DATAGRAM); - datagramsChainA, \* set of datagrams incoming to ChainA - \* @type: Str -> [created: Bool, updated: Bool]; - history \* history variable - -vars == <> - -(*************************************************************************** - Instances of ICS02Chain - ***************************************************************************) - -\* We suppose there is a single chain, ChainA -\* ChainA -- Instance of Chain.tla -ChainA == INSTANCE Chain - WITH ChainID <- "chainA", - NrClients <- NrClientsChainA, - ClientIDs <- ClientIDsChainA, - chainStore <- chainAstore, - incomingDatagrams <- datagramsChainA - -(*************************************************************************** - ICS02Environment actions - ***************************************************************************) - -\* non-deterministically create datagrams -CreateDatagrams == - \* pick a sequence from the set of client datagrams non-deterministically - /\ datagramsChainA' \in - SUBSET ClientDatagrams( - ClientIDsChainA, - SetHeights(1, MaxHeight) - ) - - /\ UNCHANGED <> - - -(*************************************************************************** - Component actions - ***************************************************************************) - -\* ChainAction: the chain takes a step -ChainAction == - ChainA!Next - -\* EnvironmentAction: non-deterministically create datagrams -EnvironmentAction == - CreateDatagrams - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -Init == - /\ ChainA!Init - /\ history = [clientID \in ClientIDsChainA |-> [created |-> FALSE, updated |-> FALSE]] - -\* Next state action -Next == - \/ ChainAction - \/ EnvironmentAction - \/ UNCHANGED vars - -\* Specification formula -Spec == Init /\ [][Next]_vars - -(*************************************************************************** - Invariants - ***************************************************************************) - -\* type invariant -TypeOK == - /\ ChainA!TypeOK - /\ history \in [ClientIDsChainA -> [created : BOOLEAN, updated : BOOLEAN]] - -\* conjunction of invariants -ICS02SingleChainInv == - /\ ChainA!CreatedClientsHaveDifferentIDs - /\ ChainA!UpdatedClientsAreCreated - -============================================================================= -\* Modification History -\* Last modified Thu Apr 15 12:16:46 CEST 2021 by ilinastoilkovska -\* Created Fri Oct 02 12:57:19 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/client/ICS02TwoChainsEnvironment.tla b/docs/spec/tla/client/ICS02TwoChainsEnvironment.tla deleted file mode 100644 index f8ed9f67b1..0000000000 --- a/docs/spec/tla/client/ICS02TwoChainsEnvironment.tla +++ /dev/null @@ -1,167 +0,0 @@ ----------------------- MODULE ICS02TwoChainsEnvironment ---------------------- - -(*************************************************************************** - A TLA+ specification of the IBC client protocol (ICS02). This module models - a system consisting of two chain that can create multiple clients, and which - operate in an environment that overapproximates the behavior of a correct - relayer. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, ICS02Definitions - -CONSTANTS - \* @type: Int; - MaxHeight, \* maximal height of all the chains in the system - \* @type: Int; - NrClientsChainA, \* number of clients that will be created on ChainA - \* @type: Int; - NrClientsChainB, \* number of clients that will be created on ChainB - \* @type: Set(Str); - ClientIDsChainA, \* a set of counterparty client IDs for ChainA - \* @type: Set(Str); - ClientIDsChainB \* a set of counterparty client IDs for ChainB - -ASSUME MaxHeight < 10 -ASSUME ClientIDsChainA \intersect ClientIDsChainB = {} - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* store of ChainB - \* @type: Set(DATAGRAM); - datagramsChainA, \* set of datagrams incoming to ChainA - \* @type: Set(DATAGRAM); - datagramsChainB, \* set of datagrams incoming to ChainB - \* @type: Str -> [created: Bool, updated: Bool]; - history \* history variable - -chainAvars == <> -chainBvars == <> -vars == <> - -(*************************************************************************** - Instances of ICS02Chain - ***************************************************************************) - -\* We suppose there are two chains that communicate, ChainA and ChainB -\* ChainA -- Instance of Chain.tla -ChainA == INSTANCE Chain - WITH ChainID <- "chainA", - NrClients <- NrClientsChainA, - ClientIDs <- ClientIDsChainA, - chainStore <- chainAstore, - incomingDatagrams <- datagramsChainA - -\* ChainB -- Instance of Chain.tla -ChainB == INSTANCE Chain - WITH ChainID <- "chainB", - NrClients <- NrClientsChainB, - ClientIDs <- ClientIDsChainB, - chainStore <- chainBstore, - incomingDatagrams <- datagramsChainB - -GetChainByID(chainID) == - IF chainID = "chainA" - THEN chainAstore - ELSE chainBstore - -GetNrClientsByID(chainID) == - IF chainID = "chainA" - THEN NrClientsChainA - ELSE NrClientsChainB - - -(*************************************************************************** - ICS02Environment actions - ***************************************************************************) - -\* non-deterministically create datagrams -CreateDatagrams == - \* pick a sequence from the set of client datagrams non-deterministically - \* for each chain - /\ datagramsChainA = {} - /\ datagramsChainB = {} - /\ datagramsChainA' \in - SUBSET ClientDatagrams( - ClientIDsChainA, - SetHeights(1, GetLatestHeight(GetChainByID("chainB"))) - ) - /\ datagramsChainB' \in - SUBSET ClientDatagrams( - ClientIDsChainB, - SetHeights(1, GetLatestHeight(GetChainByID("chainA"))) - ) - - /\ UNCHANGED <> - /\ UNCHANGED history - - -(*************************************************************************** - Component actions - ***************************************************************************) - -\* ChainAction: either chain takes a step, leaving the other -\* variables unchanged -ChainAction == - \/ /\ ChainA!Next - /\ UNCHANGED chainBvars - \/ /\ ChainB!Next - /\ UNCHANGED chainAvars - -\* EnvironmentAction: non-deterministically create datagrams -EnvironmentAction == - CreateDatagrams - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -Init == - /\ ChainA!Init - /\ ChainB!Init - /\ history = [clientID \in (ClientIDsChainA \union ClientIDsChainB) |-> - [created |-> FALSE, updated |-> FALSE]] - -\* Next state action -Next == - \/ ChainAction - \/ EnvironmentAction - \/ UNCHANGED vars - -\* Specification formula -Spec == Init /\ [][Next]_vars - -(*************************************************************************** -Invariants - ***************************************************************************) - -\* type invariant -TypeOK == - /\ ChainA!TypeOK - /\ ChainB!TypeOK - /\ history \in [ClientIDsChainA -> [created : BOOLEAN, updated : BOOLEAN]] - -\* the maximum client height is less than or equal to the current height of -\* the counterparty chain -ClientHeightsAreBelowCounterpartyHeight == - \A chainID \in ChainIDs : - \A clientNr \in 1..GetNrClientsByID(chainID) : - (GetChainByID(chainID).clientStates[clientNr].heights /= {} - => (Max(GetChainByID(chainID).clientStates[clientNr].heights) - <= GetLatestHeight(GetChainByID(GetCounterpartyChainID(chainID))))) - -\* conjunction of invariants -ICS02TwoChainsInv == - /\ ChainA!CreatedClientsHaveDifferentIDs - /\ ChainA!UpdatedClientsAreCreated - /\ ChainB!CreatedClientsHaveDifferentIDs - /\ ChainB!UpdatedClientsAreCreated - /\ ClientHeightsAreBelowCounterpartyHeight - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 19:08:27 CEST 2021 by ilinastoilkovska -\* Created Fri Oct 02 12:57:19 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/client/MC_SingleChain.tla b/docs/spec/tla/client/MC_SingleChain.tla deleted file mode 100644 index 68dd22c294..0000000000 --- a/docs/spec/tla/client/MC_SingleChain.tla +++ /dev/null @@ -1,17 +0,0 @@ ---------------------------- MODULE MC_SingleChain --------------------------- - -MaxHeight == 4 -NrClientsChainA == 2 -ClientIDsChainA == {"B1", "B2"} - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: Set(DATAGRAM); - datagramsChainA, \* set of datagrams incoming to ChainA - \* @type: Str -> [created: Bool, updated: Bool]; - history \* history variable - -INSTANCE ICS02SingleChainEnvironment - -============================================================================= diff --git a/docs/spec/tla/client/MC_TwoChains.tla b/docs/spec/tla/client/MC_TwoChains.tla deleted file mode 100644 index 7bbf436552..0000000000 --- a/docs/spec/tla/client/MC_TwoChains.tla +++ /dev/null @@ -1,22 +0,0 @@ ----------------------------- MODULE MC_TwoChains ---------------------------- - -MaxHeight == 4 -NrClientsChainA == 2 -NrClientsChainB == 2 -ClientIDsChainA == {"B1", "B2"} -ClientIDsChainB == {"A1", "A2"} - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* store of ChainB - \* @type: Set(DATAGRAM); - datagramsChainA, \* set of datagrams incoming to ChainA - \* @type: Set(DATAGRAM); - datagramsChainB, \* set of datagrams incoming to ChainB - \* @type: Str -> [created: Bool, updated: Bool]; - history \* history variable - -INSTANCE ICS02TwoChainsEnvironment -============================================================================= \ No newline at end of file diff --git a/docs/spec/tla/client/README.md b/docs/spec/tla/client/README.md deleted file mode 100644 index fbfbbbbece..0000000000 --- a/docs/spec/tla/client/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# TLA+ specification of the IBC Core Client Protocol - -This document describes the TLA+ models of the core logic of the English specification -[ICS02](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-002-client-semantics). -We start by discussing [the model of the -protocol](#the-model-of-the-protocol). -Then, we discuss the [invariants](#invariants) that we formalize, and finally, we -discuss how to [use the model](#using-the-model). - -## The Model of the Protocol - -We present models two of two different systems, which are used to check -different invariants: -1. The first system, specified in [ICS02SingleChainEnvironment.tla](ICS02SingleChainEnvironment.tla), consists of a single chain that can -create multiple clients. -The chain operates in an environment that overapproximates the -behavior of a correct relayer. -2. The second system, specified in [ICS02TwoChainsEnvironment.tla](ICS02TwoChainsEnvironment.tla), consists of two chain that can -create multiple clients. -The relayer is again overapproximated using an environment. - -Both systems extend the following modules: -- [Chain.tla](Chain.tla), which models the behavior of a chain running the IBC Core Client Protocol. -- [ICS02ClientHandlers.tla](ICS02ClientHandlers.tla), which contains definitions of -operators that are used to handle client creation and client update events. -- [ICS02Definitions.tla](ICS02Definitions.tla), which contains definitions of operators that are shared between the - different modules, and which are relevant for ICS02. - -## Invariants - -The module [Chain.tla](Chain.tla) defines the following invariants: -- `TypeOK`, the type invariant, -- `CreatedClientsHaveDifferentIDs`, which ensures that two clients two clients with the same ID cannot be created, -- `UpdatedClientsAreCreated`, which ensures that only created clients can be updated. - -These invariants are checked for a system of single chain in [ICS02SingleChainEnvironment.tla](ICS02SingleChainEnvironment.tla), and for a system of two chains in [ICS02TwoChainsEnvironment.tla](ICS02TwoChainsEnvironment.tla). -Additionally, [ICS02SingleChainEnvironment](ICS02TwoChainsEnvironment.tla) checks the invariant: -- `ClientHeightsAreBelowCounterpartyHeight`, which ensures that the maximum client -height is less than or equal to the current height of the counterparty chain. - - -## Using the Model - -### Constants - -The modules `ICS02SingleChainEnvironment.tla` and `ICS02TwoChainsEnvironment.tla` -are parameterized by the constants: - - `MaxHeight`, a natural number denoting the maximal height of the chains, - - `NrClientsChainA`, a number of clients that will be created on ChainA - - `NrClientsChainB`, a number of clients that will be created on ChainB - - `ClientIDsChainA`, a set of counterparty client IDs for ChainA - - `ClientIDsChainB`, a set of counterparty client IDs for ChainB - -We assume that the sets `ClientIDsChainA` and `ClientIDsChainB` contain distinct -client IDs. - - -### Importing the specification into TLA+ toolbox - -To import the specification in the TLA+ toolbox and run TLC: - - add a new spec in TLA+ toolbox with the root-module file `ICS02SingleChainEnvironment.tla` (or `ICS02TwoChainsEnvironment.tla`) - - create a model - - assign a value to the constants (example values can be found in `ICS02SingleChainEnvironment.cfg` (or `ICS02TwoChainsEnvironment.tla`)) - - choose "Temporal formula" as the behavior spec, and use the formula `Spec` - - choose invariants/properties that should be checked - - run TLC on the model - -#### Basic checks with TLC - -We ran TLC on `ICS02SingleChainEnvironment.tla` using the constants defined -in `ICS02SingleChainEnvironment.cfg`. -We were able to check the invariants described above within seconds. - -#### Apalache - -The specification contains type annotations for the -model checker [Apalache](https://github.com/informalsystems/apalache). -The specification passes the type check using the type checker [Snowcat](https://apalache.informal.systems/docs/apalache/typechecker-snowcat.html) -integrated in Apalache. - diff --git a/docs/spec/tla/fungible-token-transfer/Bank.tla b/docs/spec/tla/fungible-token-transfer/Bank.tla deleted file mode 100644 index 20e1af41ff..0000000000 --- a/docs/spec/tla/fungible-token-transfer/Bank.tla +++ /dev/null @@ -1,115 +0,0 @@ --------------------------------- MODULE Bank -------------------------------- - -(*************************************************************************** - This module contains definitions of operators that model the behavior of - the bank module. - ***************************************************************************) - -EXTENDS Integers, FiniteSets - -\* subtract coins from account -\* @type: (ACCOUNT -> Int, ACCOUNT, Int) => ACCOUNT -> Int; -SubtractCoins(accounts, accountID, amount) == - [accounts EXCEPT ![accountID] = accounts[accountID] - amount] - -\* add coins to account -\* @type: (ACCOUNT -> Int, ACCOUNT, Int) => ACCOUNT -> Int; -AddCoins(accounts, accountID, amount) == - LET newDomain == (DOMAIN accounts) \union {accountID} IN - - \* if an account with accountID exists - IF accountID \in DOMAIN accounts - \* add amount to account - THEN [accounts EXCEPT ![accountID] = accounts[accountID] + amount] - \* otherwise create a new account with balance equal to amount - \* and add it to the map - ELSE [accID \in newDomain |-> - IF accID = accountID - THEN amount - ELSE accounts[accID] - ] - - -\* Transfer coins from senderAccounts to receiverAccounts, depeding on -\* the sender addressees, receiver addressees and denomination -\* - senderAccounts is a map from sender addresses and denominations -\* to account balances -\* - receiverAccounts is a map from receiver addresses and denominations -\* to account balances -(* @type: (ACCOUNT -> Int, Str, ACCOUNT -> Int, Str, Seq(Str), Int) => - [senderAccounts: ACCOUNT -> Int, receiverAccounts: ACCOUNT -> Int, error: Bool]; -*) -TransferCoins(senderAccounts, senderAddr, - receiverAccounts, receiverAddr, - denomination, amount) == - LET senderAccountID == <> IN - LET receiverAccountID == <> IN - - LET senderBalance == senderAccounts[senderAccountID] IN - - \* if the sender account exists and its balance is sufficient - IF /\ senderAccountID \in DOMAIN senderAccounts - /\ senderBalance - amount >= 0 - \* subtract coins from senderAccountID and add coins to receiverAccountID - THEN [ - senderAccounts |-> SubtractCoins(senderAccounts, senderAccountID, amount), - receiverAccounts |-> AddCoins(receiverAccounts, receiverAccountID, amount), - error |-> FALSE - ] - \* otherwise report an error - ELSE [ - senderAccounts |-> senderAccounts, - receiverAccounts |-> receiverAccounts, - error |-> TRUE - ] - - -\* Burn coins on accounts, depending on the address and -\* denomination -\* - accounts is a map from addresses and denominations -\* to account balances -\* @type: (ACCOUNT -> Int, Str, Seq(Str), Int) => [accounts: ACCOUNT -> Int, error: Bool]; -BurnCoins(accounts, address, denomination, amount) == - LET accountID == <> IN - LET balance == accounts[accountID] IN - - \* if the account exists and its balance is sufficient - IF /\ accountID \in DOMAIN accounts - /\ balance - amount >= 0 - \* subtract coins from accountID - THEN [ - accounts |-> SubtractCoins(accounts, accountID, amount), - error |-> FALSE - ] - \* otherwise report an error - ELSE [ - accounts |-> accounts, - error |-> TRUE - ] - - -\* Mint new coins of denomination to account with the given address -\* @type: (ACCOUNT -> Int, Str, Seq(Str), Int, Int) => [accounts: ACCOUNT -> Int, error: Bool]; -MintCoins(accounts, address, denomination, amount, maxBalance) == - LET accountID == <> IN - - \* if the new balance does not exceed maxBalance - IF \/ /\ accountID \notin DOMAIN accounts - /\ amount <= maxBalance - \/ /\ accountID \in DOMAIN accounts - /\ accounts[accountID] + amount <= maxBalance - \* add coins to accountID - THEN [ - accounts |-> AddCoins(accounts, accountID, amount), - error |-> FALSE - ] - \* otherwise report an error - ELSE [ - accounts |-> accounts, - error |-> TRUE - ] - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 14:50:41 CEST 2021 by ilinastoilkovska -\* Created Thu Oct 28 19:49:56 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/fungible-token-transfer/Chain.tla b/docs/spec/tla/fungible-token-transfer/Chain.tla deleted file mode 100644 index af779b2999..0000000000 --- a/docs/spec/tla/fungible-token-transfer/Chain.tla +++ /dev/null @@ -1,203 +0,0 @@ -------------------------------- MODULE Chain ------------------------------- - -(*************************************************************************** - This module models the behavior of a chain running the IBC Token Transfer - Protocol. - - The chain state is represented by a chain store, which is a snapshot of the - provable and private stores, to the extent necessary for ICS20. Additionally, - a chain has a dedicated datagram container for packet datagrams (given by a - queue of datagrams that models the order in which the datagrams were submitted - by some relayer). -***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, IBCTokenTransferDefinitions, - ICS04PacketHandlers, ICS20FungibleTokenTransferHandlers - -CONSTANTS - MaxHeight, \* maximal chain height - MaxPacketSeq, \* maximal packet sequence number - MaxBalance, \* maximal account balance - ChainID, \* a chain ID - NativeDenomination \* native denomination of tokens at ChainID - - -VARIABLES - chainStore, \* chain store, containing client heights, a channel end - incomingPacketDatagrams, \* sequence of incoming packet datagrams - appPacketSeq, \* packet sequence number from the application on the chain - packetLog, \* packet log - accounts, \* a map from chainIDs and denominations to account balances - escrowAccounts \* a map from channelIDs and denominations to escrow account balances - - -vars == <> -Heights == 1..MaxHeight \* set of possible heights of the chains in the system - -(*************************************************************************** - Token transfer operators - ***************************************************************************) -\* Create a packet: Abstract away from timestamp. -\* Assume timeoutHeight is MaxHeight -CreatePacket(packetData) == - LET channelEnd == chainStore.channelEnd IN - [ - sequence |-> appPacketSeq, - timeoutHeight |-> MaxHeight, - data |-> packetData, - srcPortID |-> channelEnd.portID, - srcChannelID |-> channelEnd.channelID, - dstPortID |-> channelEnd.counterpartyPortID, - dstChannelID |-> channelEnd.counterpartyChannelID - ] - - -\* Update the chain store and packet log with ICS20 packet datagrams -\* @type: (Str, DATAGRAM, Seq(LOGENTRY)) => [store: CHAINSTORE, log: Seq(LOGENTRY), accounts: ACCOUNT -> Int, escrowAccounts: ACCOUNT -> Int]; -TokenTransferUpdate(chainID, packetDatagram, log) == - LET packet == packetDatagram.packet IN - \* get the new updated store, packet log, and accounts - LET tokenTransferUpdate == - IF packetDatagram.type = "PacketRecv" - THEN HandlePacketRecv(chainID, chainStore, packetDatagram, log, accounts, escrowAccounts, MaxBalance) - ELSE IF packetDatagram.type = "PacketAck" - THEN HandlePacketAck(chainStore, packetDatagram, log, accounts, escrowAccounts, MaxBalance) - ELSE [store |-> chainStore, - log |-> log, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts] - IN - - LET tokenTransferStore == tokenTransferUpdate.store IN - - \* update height - LET updatedStore == - IF tokenTransferStore.height + 1 \in Heights - THEN [tokenTransferStore EXCEPT !.height = tokenTransferStore.height + 1] - ELSE tokenTransferStore - IN - - [store |-> updatedStore, - log |-> tokenTransferUpdate.log, - accounts |-> tokenTransferUpdate.accounts, - escrowAccounts |-> tokenTransferUpdate.escrowAccounts] - -(*************************************************************************** - Chain actions - ***************************************************************************) -\* Advance the height of the chain until MaxHeight is reached -AdvanceChain == - /\ chainStore.height + 1 \in Heights - /\ chainStore' = [chainStore EXCEPT !.height = chainStore.height + 1] - /\ UNCHANGED <> - /\ UNCHANGED <> - -\* handle the incoming packet datagrams -HandlePacketDatagrams == - \* enabled if incomingPacketDatagrams is not empty - /\ incomingPacketDatagrams /= <<>> - /\ LET tokenTransferUpdate == TokenTransferUpdate(ChainID, Head(incomingPacketDatagrams), packetLog) IN - /\ chainStore' = tokenTransferUpdate.store - /\ packetLog' = tokenTransferUpdate.log - /\ accounts' = tokenTransferUpdate.accounts - /\ escrowAccounts' = tokenTransferUpdate.escrowAccounts - /\ incomingPacketDatagrams' = Tail(incomingPacketDatagrams) - /\ UNCHANGED appPacketSeq - -\* Send a packet -SendPacket == - \* enabled if appPacketSeq is not bigger than MaxPacketSeq - /\ appPacketSeq <= MaxPacketSeq - \* Create packet data - /\ LET createOutgoingPacketOutcome == - CreateOutgoingPacketData(accounts, - escrowAccounts, - <>, - MaxBalance, - ChainID, - GetCounterpartyChainID(ChainID)) IN - \* do nothing if there is an error - \/ /\ createOutgoingPacketOutcome.error - /\ UNCHANGED vars - \* if there is no error, send packet - \/ /\ ~createOutgoingPacketOutcome.error - /\ LET packet == CreatePacket(createOutgoingPacketOutcome.packetData) IN - LET updatedChainStore == WritePacketCommitment(chainStore, packet) IN - \* if writing the packet commitment was successful - /\ chainStore /= updatedChainStore - \* update chain store with packet committment - /\ chainStore' = updatedChainStore - \* log sent packet - /\ packetLog' = Append(packetLog, [ - type |-> "PacketSent", - srcChainID |-> ChainID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight, - data |-> packet.data - ]) - \* update bank accounts - /\ accounts' = createOutgoingPacketOutcome.accounts - \* update escrow accounts - /\ escrowAccounts' = createOutgoingPacketOutcome.escrowAccounts - \* increase application packet sequence - /\ appPacketSeq' = appPacketSeq + 1 - /\ UNCHANGED incomingPacketDatagrams - - - -\* Acknowledge a packet -AcknowledgePacket == - /\ chainStore.packetsToAcknowledge /= <<>> - \* write acknowledgements to chain store - /\ chainStore' = WriteAcknowledgement(chainStore, Head(chainStore.packetsToAcknowledge)) - \* log acknowledgement - /\ packetLog' = LogAcknowledgement(ChainID, chainStore, packetLog, Head(chainStore.packetsToAcknowledge)) - /\ UNCHANGED <> - /\ UNCHANGED <> - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -\* Initially -\* - the chain store is initialized to -\* ICS20InitChainStore(ChainID, <>) -\* (defined in IBCTokenTransferDefinitions.tla) -\* - incomingPacketDatagrams is an empty sequence -\* - the appPacketSeq is set to 1 -Init == - /\ chainStore = ICS20InitChainStore(ChainID) - /\ incomingPacketDatagrams = <<>> - /\ appPacketSeq = 1 - -\* Next state action -\* The chain either -\* - advances its height -\* - receives datagrams and updates its state -\* - sends a packet -\* - acknowledges a packet -Next == - \/ AdvanceChain - \/ HandlePacketDatagrams - \/ SendPacket - \/ AcknowledgePacket - \/ UNCHANGED vars - -Fairness == - /\ WF_vars(Next) - -(*************************************************************************** - Invariants - ***************************************************************************) -\* Type invariant -\* ChainStores, Datagrams, PacketLogEntries are defined in IBCTokenTransferDefinitions.tla -TypeOK == - /\ chainStore \in ChainStores(Heights, MaxPacketSeq, MaxBalance, {NativeDenomination}) - /\ appPacketSeq \in 1..(MaxPacketSeq + 1) - - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 15:33:11 CEST 2021 by ilinastoilkovska -\* Created Mon Oct 17 13:01:03 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/fungible-token-transfer/IBCTokenTransfer.cfg b/docs/spec/tla/fungible-token-transfer/IBCTokenTransfer.cfg deleted file mode 100644 index aa0fe31d8d..0000000000 --- a/docs/spec/tla/fungible-token-transfer/IBCTokenTransfer.cfg +++ /dev/null @@ -1,16 +0,0 @@ -CONSTANTS - MaxHeight = 5 - MaxPacketSeq = 5 - MaxBalance = 5 - NativeDenominationChainA = "atom" - NativeDenominationChainB = "eth" - -INIT Init -NEXT Next - -INVARIANTS - TypeOK - ICS20Inv - -PROPERTIES - ICS20Prop \ No newline at end of file diff --git a/docs/spec/tla/fungible-token-transfer/IBCTokenTransfer.tla b/docs/spec/tla/fungible-token-transfer/IBCTokenTransfer.tla deleted file mode 100644 index ef1df144f5..0000000000 --- a/docs/spec/tla/fungible-token-transfer/IBCTokenTransfer.tla +++ /dev/null @@ -1,440 +0,0 @@ -------------------------- MODULE IBCTokenTransfer ------------------------- - -(*************************************************************************** - A TLA+ specification of the IBC Fungible Token Transfer Protocol (ICS20). - This module is the main module in the specification and models a - system of two chains, where each chain perofmrs a transaction that sends - 1 token to the respective counterparty. - - The specification also contains type annotations for the model checker - Apalache. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, IBCTokenTransferDefinitions - -CONSTANTS - \* @type: Int; - MaxHeight, \* maximal height of all the chains in the system - \* @type: Int; - MaxPacketSeq, \* maximal packet sequence number - \* @type: Int; - MaxBalance, \* maximal account balance - \* @type: Str; - NativeDenominationChainA, \* native denomination of tokens at ChainA - \* @type: Str; - NativeDenominationChainB \* native denomination of tokens at ChainA - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* store of ChainB - \* @type: Seq(DATAGRAM); - packetDatagramsChainA, \* sequence of packet datagrams incoming to ChainA - \* @type: Seq(DATAGRAM); - packetDatagramsChainB, \* sequence of packet datagrams incoming to ChainB - \* @type: Seq(LOGENTRY); - packetLog, \* packet log - \* @type: Int; - appPacketSeqChainA, \* packet sequence number from the application on ChainA - \* @type: Int; - appPacketSeqChainB, \* packet sequence number from the application on ChainB - \* @type: ACCOUNT -> Int; - accounts, \* a map from chainIDs and denominations to account balances - \* @type: ACCOUNT -> Int; - escrowAccounts \* a map from channelIDs and denominations to escrow account balances - -chainAvars == <> -chainBvars == <> -vars == <> - -Heights == 1..MaxHeight -NativeDenominations == {NativeDenominationChainA, NativeDenominationChainB} -AllDenominations == Seq(ChannelIDs \union PortIDs \union NativeDenominations) - -(*************************************************************************** - Instances of ICS20Chain - ***************************************************************************) - -\* We suppose there are two chains that communicate, ChainA and ChainB -\* ChainA -- Instance of Chain.tla -ChainA == INSTANCE Chain - WITH ChainID <- "chainA", - NativeDenomination <- NativeDenominationChainA, - chainStore <- chainAstore, - incomingPacketDatagrams <- packetDatagramsChainA, - appPacketSeq <- appPacketSeqChainA - -\* ChainB -- Instance of Chain.tla -ChainB == INSTANCE Chain - WITH ChainID <- "chainB", - NativeDenomination <- NativeDenominationChainB, - chainStore <- chainBstore, - incomingPacketDatagrams <- packetDatagramsChainB, - appPacketSeq <- appPacketSeqChainB - - (*************************************************************************** - Environment operators - ***************************************************************************) - -\* get chain store by ID -GetChainByID(chainID) == - IF chainID = "chainA" - THEN chainAstore - ELSE chainBstore - -\* update the client height of the client for the counterparty chain of chainID -UpdateClientHeights(chainID) == - - /\ \/ /\ chainID = "chainA" - /\ chainAstore' = [chainAstore EXCEPT - !.counterpartyClientHeights = - chainAstore.counterpartyClientHeights - \union - {chainBstore.height} - ] - /\ UNCHANGED chainBstore - \/ /\ chainID = "chainB" - /\ chainBstore' = [chainBstore EXCEPT - !.counterpartyClientHeights = - chainBstore.counterpartyClientHeights - \union - {chainAstore.height} - ] - /\ UNCHANGED chainAstore - /\ UNCHANGED <> - /\ UNCHANGED <> - - -\* Compute a packet datagram designated for dstChainID, based on the packetLogEntry -\* @type: (Str, Str, LOGENTRY) => DATAGRAM; -PacketDatagram(srcChainID, dstChainID, packetLogEntry) == - - LET srcChannelID == GetChannelID(srcChainID) IN \* "chanAtoB" (if srcChainID = "chainA") - LET dstChannelID == GetChannelID(dstChainID) IN \* "chanBtoA" (if dstChainID = "chainB") - - LET srcPortID == GetPortID(srcChainID) IN \* "portA" (if srcChainID = "chainA") - LET dstPortID == GetPortID(dstChainID) IN \* "portB" (if dstChainID = "chainB") - - LET srcHeight == GetLatestHeight(GetChainByID(srcChainID)) IN - - \* the source chain of the packet that is received by dstChainID is srcChainID - LET recvPacket == [ - sequence |-> packetLogEntry.sequence, - timeoutHeight |-> packetLogEntry.timeoutHeight, - srcChannelID |-> srcChannelID, - srcPortID |-> srcPortID, - dstChannelID |-> dstChannelID, - dstPortID |-> dstPortID, - data |-> packetLogEntry.data - ] IN - - \* the source chain of the packet that is acknowledged by srcChainID is dstChainID - LET ackPacket == [ - sequence |-> packetLogEntry.sequence, - timeoutHeight |-> packetLogEntry.timeoutHeight, - srcChannelID |-> dstChannelID, - srcPortID |-> dstPortID, - dstChannelID |-> srcChannelID, - dstPortID |-> srcPortID, - data |-> packetLogEntry.data - ] IN - - IF packetLogEntry.type = "PacketSent" - THEN [ - type |-> "PacketRecv", - packet |-> recvPacket, - proofHeight |-> srcHeight - ] - ELSE IF packetLogEntry.type = "WriteAck" - THEN [ - type |-> "PacketAck", - packet |-> ackPacket, - acknowledgement |-> packetLogEntry.acknowledgement, - proofHeight |-> srcHeight - ] - ELSE NullDatagram - -(*************************************************************************** - Environment actions - ***************************************************************************) - \* update the client height of some chain - UpdateClients == - \E chainID \in ChainIDs : UpdateClientHeights(chainID) - -\* create datagrams depending on packet log -CreateDatagrams == - /\ packetLog /= <<>> - /\ LET packetLogEntry == Head(packetLog) IN - LET srcChainID == packetLogEntry.srcChainID IN - LET dstChainID == GetCounterpartyChainID(srcChainID) IN - LET packetDatagram == PacketDatagram(srcChainID, dstChainID, packetLogEntry) IN - /\ \/ /\ packetDatagram = NullDatagram - /\ UNCHANGED <> - \/ /\ packetDatagram /= NullDatagram - /\ srcChainID = "chainA" - /\ packetDatagramsChainB' = - Append(packetDatagramsChainB, packetDatagram) - /\ UNCHANGED packetDatagramsChainA - \/ /\ packetDatagram /= NullDatagram - /\ srcChainID = "chainB" - /\ packetDatagramsChainA' = - Append(packetDatagramsChainA, - PacketDatagram(srcChainID, dstChainID, packetLogEntry)) - /\ UNCHANGED packetDatagramsChainB - - /\ packetLog' = Tail(packetLog) - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED <> - -(*************************************************************************** - Component actions - ***************************************************************************) - -\* ChainAction: either chain takes a step, leaving the other -\* variables unchange -ChainAction == - \/ /\ ChainA!Next - /\ UNCHANGED chainBvars - \/ /\ ChainB!Next - /\ UNCHANGED chainAvars - -\* EnvironmentAction: create packet datagrams if packet log is not empty -EnvironmentAction == - \/ CreateDatagrams - \/ UpdateClients - -(*************************************************************************** - Specification - ***************************************************************************) - -\* Initial state predicate -Init == - /\ ChainA!Init - /\ ChainB!Init - \* bank accounts for each chain ID and its native denomination have MaxBalance - /\ accounts = - [<> \in {<<"chainA", <>>>, - <<"chainB", <>>>} - |-> MaxBalance] - \* escrow accounts for each channel ID and the chain's native denomination have balance 0 - /\ escrowAccounts = - [<> \in {<<"chanBtoA", <>>>, - <<"chanAtoB", <>>>} - |-> 0] - /\ packetLog = <<>> - - -\* Next state action -Next == - \/ ChainAction - \/ EnvironmentAction - \/ UNCHANGED vars - -\* fairness constraint -Fairness == - /\ ChainA!Fairness - /\ ChainB!Fairness - /\ WF_vars(Next) - -Spec == Init /\ [][Next]_vars /\ Fairness - -(*************************************************************************** - Helper operators used in properties and invariants - ***************************************************************************) - -RECURSIVE Sum(_) - -\* sum of elements in a set -Sum(S) == - IF S = {} - THEN 0 - ELSE LET x == CHOOSE y \in S: TRUE IN - x + Sum(S \ {x}) - -\* get the native denomination based on chainID -GetNativeDenomination(chainID) == - IF chainID = "chainA" - THEN NativeDenominationChainA - ELSE NativeDenominationChainB - -\* set of prefixed denominations given a native denomination -\* @type: (Str) => Set(Seq(Str)); -PrefixedDenoms(nativeDenomination) == - {<> : portID \in PortIDs, channelID \in ChannelIDs} - -\* set of escrow account IDs -\* @type: Set(<>); -EscrowAccountsDomain == - {<>>> : - chainID \in ChainIDs} - -\* set of all denominations -Denominations == - {<>, <>} - \union - PrefixedDenoms(NativeDenominationChainA) - \union - PrefixedDenoms(NativeDenominationChainB) - -\* create expected packet receipt for a given packet commitment -\* @type: (Str, PACKETCOMM) => [channelID: Str, portID: Str, sequence: Int]; -PacketReceipt(chainID, packetCommitment) == - [ - channelID |-> GetCounterpartyChannelID(chainID), - portID |-> GetCounterpartyPortID(chainID), - sequence |-> packetCommitment.sequence - ] - -\* get the escrow account IDs for the native denomination -\* @type: (Str) => Set(<>); -EscrowAccountIDs(nativeDenomination) == - {<>>> : channelID \in ChannelIDs} - -\* a packet is in flight if a packet commitment exists, but a -\* corresponding packet receipt is not on the counterparty chain -\* @type: (Str, Str) => Set(Int); -GetAmountsInFlight(chainID, nativeDenom) == - - \* get packet commitments of chainID and packet receipts of its counterparty - LET packetCommittments == GetChainByID(chainID).packetCommitments IN - LET counterpartyChainID == GetCounterpartyChainID(chainID) IN - LET counterpartyPacketReceipts == GetChainByID(counterpartyChainID).packetReceipts IN - - \* get packet commitments for packets in flight - LET inFlight == {pc \in packetCommittments : - PacketReceipt(chainID, pc) \notin counterpartyPacketReceipts} IN - - \* get packet data for packets in flight - LET inFlightData == {pc.data : pc \in inFlight} IN - - \* get packet data for packets in flight that have a prefixed denomination, - \* where the last field is the native denomination of chainID - LET inFlightDataOfDenomination == {d \in inFlightData : - d.denomination[Len(d.denomination)] = nativeDenom} IN - - \* compute set of amounts of the packets in flight that have - \* the desired denomination - {d.amount : d \in inFlightDataOfDenomination} - -\* compute sum over accounts that have chainID's native denomination -SumOverLocalAccounts(chainID) == - \* get the native denomination of chainID - LET nativeDenomination == GetNativeDenomination(chainID) IN - \* get counterparty channel ID - LET counterpartyChannelID == GetCounterpartyChannelID(chainID) IN - - \* compute the sum over bank accounts and escrow accounts with - \* native denomination - accounts[<>>>] + - escrowAccounts[<>>>] - -\* compute the sum over the amounts in escrow accounts -\* @type: (Str) => Int; -SumOverEscrowAccounts(chainID) == - \* get the native denomination of chainID - LET nativeDenomination == GetNativeDenomination(chainID) IN - - \* get the escrow account IDs for the native denomination - LET escrowAccountIDs == EscrowAccountIDs(nativeDenomination) IN - \* get the amounts in escrow accounts for the native denomination - LET escrowAccountAmounts == {escrowAccounts[accountID] : - accountID \in (escrowAccountIDs \intersect DOMAIN escrowAccounts)} IN - - \* compute the sum over the amounts in escrow accounts - Sum(escrowAccountAmounts) - -\* compute the sum over the amounts of packets in flight -SumOverPacketsInFlight(chainID) == - \* get the native denomination of chainID - LET nativeDenomination == GetNativeDenomination(chainID) IN - - \* get the set of amounts of packets in flight for each chain - LET amountsInFlight == UNION {GetAmountsInFlight(chID, nativeDenomination) : chID \in ChainIDs} IN - - \* compute the sum over the amounts of packets in flight - Sum(amountsInFlight) - -\* compute the sum over the amounts in bank accounts for prefixed denomination -SumOverBankAccountsWithPrefixedDenoms(chainID) == - \* get the native denomination of chainID - LET nativeDenomination == GetNativeDenomination(chainID) IN - - \* compute the set of prefixed denominations - LET prefixedDenominations == {pd \in PrefixedDenoms(nativeDenomination) : - /\ Len(pd) > 1 - /\ pd[Len(pd)] = nativeDenomination} IN - - \* get the bank account IDs for the prefixed denominations - LET accountIDs == {<> : - chID \in ChainIDs, prefixedDenomination \in prefixedDenominations} IN - - \* get the amounts in bank accounts for the prefixed denominations - LET accountAmounts == {accounts[accountID] : - accountID \in (accountIDs \intersect DOMAIN accounts)} IN - - \* compute the sum over the amounts in bank accounts - Sum(accountAmounts) - -(*************************************************************************** - Properties and invariants - ***************************************************************************) - -\* Type invariant -TypeOK == - /\ chainAstore \in ChainStores(Heights, MaxPacketSeq, MaxBalance, NativeDenominations) - /\ chainBstore \in ChainStores(Heights, MaxPacketSeq, MaxBalance, NativeDenominations) - /\ appPacketSeqChainA \in 1..(MaxPacketSeq + 1) - /\ appPacketSeqChainB \in 1..(MaxPacketSeq + 1) - /\ packetDatagramsChainA \in Seq(Datagrams(Heights, MaxPacketSeq, MaxBalance, NativeDenominations)) - /\ packetDatagramsChainB \in Seq(Datagrams(Heights, MaxPacketSeq, MaxBalance, NativeDenominations)) - /\ packetLog \in Seq(PacketLogEntries(Heights, MaxPacketSeq, MaxBalance, NativeDenominations)) - /\ DOMAIN accounts \subseteq ChainIDs \X AllDenominations - /\ \A accountID \in DOMAIN accounts : accounts[accountID] \in 0..MaxBalance - /\ DOMAIN escrowAccounts \subseteq EscrowAccountsDomain - /\ \A accountID \in DOMAIN escrowAccounts : escrowAccounts[accountID] \in 0..MaxBalance - -\* There are MaxBalance coins of the native denomination in bank and escrow accounts -\* for a given chain -\* Note: this property still holds if the counterparty chain is malicious -PreservationOfTotalSupplyLocal == - \A chainID \in ChainIDs : - SumOverLocalAccounts(chainID) = MaxBalance - -\* The amount in nativeDenomination in escrow accounts -\* is equal to the sum of: -\* * the amounts in-flight packets in a (prefixed or unprefixed) denomination ending -\* in nativeDenomination, and -\* * the amounts in accounts in a prefixed denomination ending in -\* nativeDenomination, in which it is not native -\* Note: this property is satisfied only if both chains are correct -PreservationOfTotalSupplyGlobal == - \A chainID \in ChainIDs : - SumOverEscrowAccounts(chainID) = - SumOverPacketsInFlight(chainID) + SumOverBankAccountsWithPrefixedDenoms(chainID) - -\* A violation of this property is an execution where fungibility is preserved, -\* where a return payment is effectuated -\* Note: this property should also be violated if the counterparty chain is malicious -\* and effectuates a return payment -NonPreservationOfFungibility == - \A accountID \in EscrowAccountsDomain : - [](escrowAccounts[accountID] > 0 - => [](escrowAccounts[accountID] > 0)) - -\* ICS20Inv invariant: conjunction of invariants -ICS20Inv == - /\ PreservationOfTotalSupplyLocal - /\ PreservationOfTotalSupplyGlobal - -\* ICS20Prop property: conjunction of properties -ICS20Prop == - NonPreservationOfFungibility - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 15:24:26 CEST 2021 by ilinastoilkovska -\* Created Mon Oct 17 13:00:24 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/fungible-token-transfer/IBCTokenTransferDefinitions.tla b/docs/spec/tla/fungible-token-transfer/IBCTokenTransferDefinitions.tla deleted file mode 100644 index 8e3f3c5641..0000000000 --- a/docs/spec/tla/fungible-token-transfer/IBCTokenTransferDefinitions.tla +++ /dev/null @@ -1,426 +0,0 @@ --------------------- MODULE IBCTokenTransferDefinitions -------------------- - -(*************************************************************************** - This module contains definitions of operators that are shared between the - different modules, and which are relevant for ICS20. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences - -(************************ TYPE ALIASES FOR SNOWCAT *************************) -(* @typeAlias: CHAN = - [ - state: Str, - order: Str, - portID: Str, - channelID: Str, - counterpartyPortID: Str, - counterpartyChannelID: Str, - version: Str - ]; -*) -(* @typeAlias: PACKETDATA = - [ - denomination: Seq(Str), - amount: Int, - sender: Str, - receiver: Str - ]; -*) -(* @typeAlias: PACKET = - [ - sequence: Int, - timeoutHeight: Int, - data: PACKETDATA, - srcPortID: Str, - srcChannelID: Str, - dstPortID: Str, - dstChannelID: Str - ]; -*) -(* @typeAlias: PACKETCOMM = - [ - portID: Str, - channelID: Str, - data: PACKETDATA, - sequence: Int, - timeoutHeight: Int - ]; -*) -(* @typeAlias: PACKETREC = - [ - portID: Str, - channelID: Str, - sequence: Int - ]; -*) -(* @typeAlias: PACKETACK = - [ - portID: Str, - channelID: Str, - sequence: Int, - acknowledgement: Bool - ]; -*) -(* @typeAlias: ACCOUNT = - <>; -*) -(* @typeAlias: PACKETTOACK = - <>; -*) -(* @typeAlias: CHAINSTORE = - [ - height: Int, - counterpartyClientHeights: Set(Int), - channelEnd: CHAN, - packetCommitments: Set(PACKETCOMM), - packetsToAcknowledge: Seq(PACKETTOACK), - packetReceipts: Set(PACKETREC), - packetAcknowledgements: Set(PACKETACK), - escrowAccounts: ACCOUNT -> Int - ]; -*) -(* @typeAlias: DATAGRAM = - [ - type: Str, - height: Int, - proofHeight: Int, - consensusHeight: Int, - clientID: Str, - counterpartyClientID: Str, - connectionID: Str, - counterpartyConnectionID: Str, - versions: Set(Int), - portID: Str, - channelID: Str, - counterpartyPortID: Str, - counterpartyChannelID: Str, - packet: PACKET, - acknowledgement: Bool - ]; -*) -(* @typeAlias: LOGENTRY = - [ - type: Str, - srcChainID: Str, - sequence: Int, - timeoutHeight: Int, - acknowledgement: Bool, - data: PACKETDATA - ]; -*) -(* @typeAlias: HISTORY = - [ - connInit: Bool, - connTryOpen: Bool, - connOpen: Bool, - chanInit: Bool, - chanTryOpen: Bool, - chanOpen: Bool, - chanClosed: Bool - ]; -*) - -(********************** Common operator definitions ***********************) -ChainIDs == {"chainA", "chainB"} -ChannelIDs == {"chanAtoB", "chanBtoA"} -PortIDs == {"portA", "portB"} -ChannelStates == {"UNINIT", "INIT", "TRYOPEN", "OPEN", "CLOSED"} - -nullHeight == 0 -nullChannelID == "none" -nullPortID == "none" -nullEscrowAddress == "none" - -Max(S) == CHOOSE x \in S: \A y \in S: y <= x - -(******************************* ChannelEnds ******************************* - A set of channel end records. - A channel end record contains the following fields: - - - state -- a string - Stores the current state of this channel end. It has one of the - following values: "UNINIT", "INIT", "TRYOPEN", "OPEN", "CLOSED". - - - order -- a string - Stores whether the channel end is ordered or unordered. It has one of the - following values: "UNORDERED", "ORDERED" - - * for ICS20 we require that the channels are unordered - - - portID -- a port identifier - Stores the port identifier of this channel end. - - - channelID -- a channel identifier - Stores the channel identifier of this channel end. - - - counterpartyPortID -- a port identifier - Stores the port identifier of the counterparty channel end. - - - counterpartyChannelID -- a channel identifier - Stores the channel identifier of the counterparty channel end. - - - version -- a string - The version is "ics20-1" for fungible token transfer - ***************************************************************************) - -ChannelEnds == - [ - state : ChannelStates, - order : {"UNORDERED"}, - portID : PortIDs \union {nullPortID}, - channelID : ChannelIDs \union {nullChannelID}, - counterpartyPortID : PortIDs \union {nullPortID}, - counterpartyChannelID : ChannelIDs \union {nullChannelID}, - version : {"ics20-1"} - ] - -(************************* FungibleTokenPacketData ************************* - A set of records defining ICS20 packet data. - - Denominations are defined as Seq(ChannelIDs \union PortIDs \union NativeDenominations), - where NativeDenominations is the set of native denominations of the two chains. - ***************************************************************************) -\* @type: (Int, Set(Seq(Str))) => Set(PACKETDATA); -FungibleTokenPacketData(maxBalance, Denominations) == - [ - denomination : Denominations, - amount : 0..maxBalance, - sender : ChainIDs, - receiver : ChainIDs - ] - -(******* PacketCommitments, PacketReceipts, PacketAcknowledgements *********) -\* Set of packet commitments -\* @type: (Set(Int), Int, Int, Set(Seq(Str))) => Set(PACKETCOMM); -PacketCommitments(Heights, maxPacketSeq, maxBalance, Denominations) == - [ - channelID : ChannelIDs, - portID : PortIDs, - sequence : 1..maxPacketSeq, - data : FungibleTokenPacketData(maxBalance, Denominations), - timeoutHeight : Heights - ] - -\* Set of packet receipts -\* @type: (Int) => Set(PACKETREC); -PacketReceipts(maxPacketSeq) == - [ - channelID : ChannelIDs, - portID : PortIDs, - sequence : 1..maxPacketSeq - ] - -\* Set of packet acknowledgements -\* @type: (Int) => Set(PACKETACK); -PacketAcknowledgements(maxPacketSeq) == - [ - channelID : ChannelIDs, - portID : PortIDs, - sequence : 1..maxPacketSeq, - acknowledgement : BOOLEAN - ] - -(********************************* Packets *********************************) -\* Set of packets -\* @type: (Set(Int), Int, Int, Set(Seq(Str))) => Set(PACKET); -Packets(Heights, maxPacketSeq, maxBalance, Denominations) == - [ - sequence : 1..maxPacketSeq, - timeoutHeight : Heights, - data : FungibleTokenPacketData(maxBalance, Denominations), - srcPortID : PortIDs, - srcChannelID : ChannelIDs, - dstPortID : PortIDs, - dstChannelID : ChannelIDs - ] - -(******************************** ChainStores ****************************** - A set of chain store records, with fields relevant for ICS20. - A chain store record contains the following fields: - - - height : an integer between nullHeight and MaxHeight. - Stores the current height of the chain. - - - counterpartyClientHeights : a set of integers between 1 and MaxHeight - Stores the heights of the client for the counterparty chain. - - - channelEnd : a channel end - Stores data about the channel with the counterparty chain. - - - packetCommitments : a set of packet commitments - A packet commitment is added to this set when a chain sends a packet - to the counterparty. - - - packetReceipts : a set of packet receipts - A packet receipt is added to this set when a chain received a packet - from the counterparty chain. - - - packetAcknowledgements : a set of packet acknowledgements - A packet acknowledgement is added to this set when a chain writes an - acknowledgement for a packet it received from the counterparty - - - packetsToAcknowledge : a sequence of pairs <> - A pair <>, where ack is a Boolean value, is added - to this sequence when a chain successfully receives a PacketRecv - datagram - - A chain store is the combination of the provable and private stores. - - ***************************************************************************) -\* @type: (Set(Int), Int, Int, Set(Str)) => Set(CHAINSTORE); -ChainStores(Heights, maxPacketSeq, maxBalance, NativeDenominations) == - [ - height : Heights, - counterpartyClientHeights : SUBSET(Heights), - channelEnd : ChannelEnds, - - packetCommitments : SUBSET(PacketCommitments(Heights, maxPacketSeq, maxBalance, - Seq(ChannelIDs \union PortIDs \union NativeDenominations))), - packetReceipts : SUBSET(PacketReceipts(maxPacketSeq)), - packetAcknowledgements : SUBSET(PacketAcknowledgements(maxPacketSeq)), - packetsToAcknowledge : Seq(Packets(Heights, maxPacketSeq, maxBalance, - Seq(ChannelIDs \union PortIDs \union NativeDenominations)) - \X - BOOLEAN) - ] - -(******************************** Datagrams ********************************) -\* Set of datagrams -Datagrams(Heights, maxPacketSeq, maxBalance, NativeDenominations) == - [type : {"PacketRecv"}, - packet : Packets(Heights, maxPacketSeq, maxBalance, - Seq(ChannelIDs \union PortIDs \union NativeDenominations)), - proofHeight : Heights] - \union - [type : {"PacketAck"}, - packet : Packets(Heights, maxPacketSeq, maxBalance, - Seq(ChannelIDs \union PortIDs \union NativeDenominations)), - acknowledgement : BOOLEAN, - proofHeight : Heights] - -\* Null datagram -NullDatagram == - [type |-> "null"] - -(**************************** PacketLogEntries *****************************) -\* Set of packet log entries -PacketLogEntries(Heights, maxPacketSeq, maxBalance, NativeDenominations) == - [ - type : {"PacketSent"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - timeoutHeight : Heights, - data : FungibleTokenPacketData(maxBalance, - Seq(ChannelIDs \union PortIDs \union NativeDenominations)) - ] \union [ - type : {"PacketRecv"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - portID : PortIDs, - channelID : ChannelIDs, - timeoutHeight : Heights - ] \union [ - type : {"WriteAck"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - portID : PortIDs, - channelID : ChannelIDs, - timeoutHeight : Heights, - data : FungibleTokenPacketData(maxBalance, - Seq(ChannelIDs \union PortIDs \union NativeDenominations)), - acknowledgement : BOOLEAN - ] - -(*************************************************************************** - Chain helper operators - ***************************************************************************) - -\* get the ID of chainID's counterparty chain -GetCounterpartyChainID(chainID) == - IF chainID = "chainA" THEN "chainB" ELSE "chainA" - -\* get the maximal height of the client for chainID's counterparty chain -\* @type: (CHAINSTORE) => Int; -GetMaxCounterpartyClientHeight(chain) == - IF chain.counterpartyClientHeights /= {} - THEN Max(chain.counterpartyClientHeights) - ELSE nullHeight - -\* get the channel ID of the channel end at chainID -GetChannelID(chainID) == - IF chainID = "chainA" - THEN "chanAtoB" - ELSE IF chainID = "chainB" - THEN "chanBtoA" - ELSE nullChannelID - -\* get the channel ID of the channel end at chainID's counterparty chain -GetCounterpartyChannelID(chainID) == - IF chainID = "chainA" - THEN "chanBtoA" - ELSE IF chainID = "chainB" - THEN "chanAtoB" - ELSE nullChannelID - -\* get the port ID at chainID -GetPortID(chainID) == - IF chainID = "chainA" - THEN "portA" - ELSE IF chainID = "chainB" - THEN "portB" - ELSE nullPortID - -\* get the port ID at chainID's counterparty chain -GetCounterpartyPortID(chainID) == - IF chainID = "chainA" - THEN "portB" - ELSE IF chainID = "chainB" - THEN "portA" - ELSE nullPortID - -\* get the latest height of chain -\* @type: (CHAINSTORE) => Int; -GetLatestHeight(chain) == - chain.height - -(*************************************************************************** - Initial values of a channel end, chain store, accounts for ICS20 - ***************************************************************************) -\* Initial value of a channel end: -\* - state is "OPEN" (we assume channel handshake has successfully finished) -\* - order is "UNORDERED" (requirement of ICS20) -\* - channelID, counterpartyChannelID -InitUnorderedChannelEnd(ChainID) == - [ - state |-> "OPEN", - order |-> "UNORDERED", - portID |-> GetPortID(ChainID), - channelID |-> GetChannelID(ChainID), - counterpartyPortID |-> GetCounterpartyPortID(ChainID), - counterpartyChannelID |-> GetCounterpartyChannelID(ChainID), - version |-> "ics20-1" - ] - -\* A set of initial values of the chain store for ICS20: -\* - height is initialized to 1 -\* - counterpartyClientHeights is the set of installed client heights -\* - the channelEnd is initialized to InitUnorderedChannelEnd -\* - the packet committments, receipts, acknowledgements, and packets -\* to acknowledge are empty -ICS20InitChainStore(ChainID) == - [ - height |-> 1, - counterpartyClientHeights |-> {}, - channelEnd |-> InitUnorderedChannelEnd(ChainID), - - packetCommitments |-> {}, - packetReceipts |-> {}, - packetAcknowledgements |-> {}, - packetsToAcknowledge |-> <<>> - ] - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 15:27:35 CEST 2021 by ilinastoilkovska -\* Created Mon Oct 17 13:01:38 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/fungible-token-transfer/ICS04PacketHandlers.tla b/docs/spec/tla/fungible-token-transfer/ICS04PacketHandlers.tla deleted file mode 100644 index cfcd4883a8..0000000000 --- a/docs/spec/tla/fungible-token-transfer/ICS04PacketHandlers.tla +++ /dev/null @@ -1,359 +0,0 @@ ------------------------- MODULE ICS04PacketHandlers ------------------------ - -(*************************************************************************** - This module contains definitions of operators that are used to handle - packet datagrams. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, IBCTokenTransferDefinitions, - ICS20FungibleTokenTransferHandlers - -(*************************************************************************** - Packet datagram handlers - ***************************************************************************) - -\* Handle "PacketRecv" datagrams -(* @type: (Str, CHAINSTORE, DATAGRAM, Seq(LOGENTRY), ACCOUNT -> Int, ACCOUNT -> Int, Int) - => [store: CHAINSTORE, log: Seq(LOGENTRY), accounts: ACCOUNT -> Int, escrowAccounts: ACCOUNT -> Int]; -*) -HandlePacketRecv(chainID, chain, packetDatagram, log, accounts, escrowAccounts, maxBalance) == - \* get chainID's channel end - LET channelEnd == chain.channelEnd IN - \* get packet - LET packet == packetDatagram.packet IN - - LET packetRecvUpdates == - IF \* if the channel end is open for packet transmission - /\ channelEnd.state = "OPEN" - \* if the packet has not passed the timeout height - /\ \/ packet.timeoutHeight = 0 - \/ chain.height < packet.timeoutHeight - \* if the "PacketRecv" datagram has valid port and channel IDs - /\ packet.srcPortID = channelEnd.counterpartyPortID - /\ packet.srcChannelID = channelEnd.counterpartyChannelID - /\ packet.dstPortID = channelEnd.portID - /\ packet.dstChannelID = channelEnd.channelID - THEN \* call application function OnPacketRecv - LET OnPacketRecvOutcome == - OnPacketRecv(chain, accounts, escrowAccounts, packet, maxBalance) IN - \* if OnPacketRecv is successful - IF /\ ~OnPacketRecvOutcome.error - \* if the packet has not been received - /\ [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence - ] \notin chain.packetReceipts - THEN \* construct log entry for packet log - LET logEntry == [ - type |-> "PacketRecv", - srcChainID |-> chainID, - sequence |-> packet.sequence, - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - timeoutHeight |-> packet.timeoutHeight - ] IN - - LET updatedChainStore == [chain EXCEPT - \* record that the packet has been received - !.packetReceipts = - chain.packetReceipts - \union - {[ - channelID |-> packet.dstChannelID, - portID |-> packet.dstPortID, - sequence |-> packet.sequence - ]}, - \* add packet to the set of packets for which an acknowledgement - \* should be written - !.packetsToAcknowledge = - Append(chain.packetsToAcknowledge, - <>) - ] IN - - \* update the chain store, packet log, and bank accounts - [store |-> updatedChainStore, - log |-> Append(log, logEntry), - accounts |-> OnPacketRecvOutcome.accounts, - escrowAccounts |-> OnPacketRecvOutcome.escrowAccounts] - - \* otherwise, do not update the chain store, the log, the accounts - ELSE [store |-> chain, - log |-> log, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts] - ELSE [store |-> chain, - log |-> log, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts] - IN - - packetRecvUpdates - -\* Handle "PacketAck" datagrams -(* @type: (CHAINSTORE, DATAGRAM, Seq(LOGENTRY), ACCOUNT -> Int, ACCOUNT -> Int, Int) - => [store: CHAINSTORE, log: Seq(LOGENTRY), accounts: ACCOUNT -> Int, escrowAccounts: ACCOUNT -> Int]; -*) -HandlePacketAck(chain, packetDatagram, log, accounts, escrowAccounts, maxBalance) == - \* get chainID's channel end - LET channelEnd == chain.channelEnd IN - \* get packet - LET packet == packetDatagram.packet IN - \* get acknowledgement - LET ack == packetDatagram.acknowledgement IN - \* get packet committment that should be in chain store - LET packetCommitment == [ - portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight - ] IN - - \* call application function OnPacketAck - LET OnPacketAckOutcome == - OnPaketAck(accounts, escrowAccounts, packet, ack, maxBalance) IN - - IF \* if the channel and connection ends are open for packet transmission - /\ channelEnd.state = "OPEN" - \* if the packet commitment exists in the chain store - /\ packetCommitment \in chain.packetCommitments - \* if the "PacketAck" datagram has valid port and channel IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* remove packet commitment - THEN LET updatedChainStore == - [chain EXCEPT - !.packetCommitments = - chain.packetCommitments \ {packetCommitment}] - IN - - [store |-> updatedChainStore, - log |-> log, - accounts |-> OnPacketAckOutcome.accounts, - escrowAccounts |-> OnPacketAckOutcome.escrowAccounts] - - \* otherwise, do not update the chain store, log and accounts - ELSE [store |-> chain, - log |-> log, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts] - - -\* write packet committments to chain store -\* @type: (CHAINSTORE, PACKET) => CHAINSTORE; -WritePacketCommitment(chain, packet) == - \* get channel end - LET channelEnd == chain.channelEnd IN - \* get latest client height - LET latestClientHeight == GetMaxCounterpartyClientHeight(chain) IN - - IF \* channel end is neither null nor closed - /\ channelEnd.state \notin {"UNINIT", "CLOSED"} - \* there exists a counterparty client - \* (used to abstract the check if the connection end is not in UNINIT) - /\ latestClientHeight /= 0 - \* if the packet has valid port and channel IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* timeout height has not passed - /\ \/ packet.timeoutHeight = 0 - \/ latestClientHeight < packet.timeoutHeight - THEN [chain EXCEPT - !.packetCommitments = - chain.packetCommitments - \union - {[portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - data |-> packet.data, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight]} - ] - \* otherwise, do not update the chain store - ELSE chain - -\* write acknowledgements to chain store -\* @type: (CHAINSTORE, PACKETTOACK) => CHAINSTORE; -WriteAcknowledgement(chain, packetToAck) == - \* packetToack is a pair of a packet and its acknowledgement - LET packet == packetToAck[1] IN - LET ack == packetToAck[2] IN - - \* create a packet acknowledgement for this packet - LET packetAcknowledgement == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence, - acknowledgement |-> ack - ] IN - - \* if the acknowledgement for the packet has not been written - IF packetAcknowledgement \notin chain.packetAcknowledgements - THEN \* write the acknowledgement to the chain store and remove - \* the packet from the set of packets to acknowledge - [chain EXCEPT !.packetAcknowledgements = - chain.packetAcknowledgements - \union - {packetAcknowledgement}, - !.packetsToAcknowledge = - Tail(chain.packetsToAcknowledge)] - - \* remove the packet from the sequence of packets to acknowledge - ELSE [chain EXCEPT !.packetsToAcknowledge = - Tail(chain.packetsToAcknowledge)] - -\* log acknowledgements to packet Log -\* @type: (Str, CHAINSTORE, Seq(LOGENTRY), PACKETTOACK) => Seq(LOGENTRY); -LogAcknowledgement(chainID, chain, log, packetToAck) == - \* packetToAck is a pair of a packet and its acknowledgement - LET packet == packetToAck[1] IN - LET ack == packetToAck[2] IN - - \* create a packet acknowledgement for this packet - LET packetAcknowledgement == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence, - acknowledgement |-> ack - ] IN - - \* if the acknowledgement for the packet has not been written - IF packetAcknowledgement \notin chain.packetAcknowledgements - THEN \* append a "WriteAck" log entry to the log - LET packetLogEntry == [ - type |-> "WriteAck", - srcChainID |-> chainID, - sequence |-> packet.sequence, - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - timeoutHeight |-> packet.timeoutHeight, - acknowledgement |-> ack, - data |-> packet.data - ] IN - Append(log, packetLogEntry) - \* do not add anything to the log - ELSE log - -\* check if a packet timed out -(* @type: (CHAINSTORE, CHAINSTORE, ACCOUNT -> Int, ACCOUNT -> Int, PACKET, Int, Int) - => [store: CHAINSTORE, accounts: ACCOUNT -> Int, escrowAccounts: ACCOUNT -> Int]; -*) -TimeoutPacket(chain, counterpartyChain, accounts, escrowAccounts, - packet, proofHeight, maxBalance) == - \* get channel end - LET channelEnd == chain.channelEnd IN - \* get packet committment that should be in chain store - LET packetCommitment == [ - portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - data |-> packet.data, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight - ] IN - \* get packet receipt that should be absent in counterparty chain store - LET packetReceipt == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence - ] IN - - \* call application function OnTimeoutPacket - LET OnTimeoutPacketOutcome == - OnTimeoutPacket(accounts, escrowAccounts, packet, maxBalance) IN - - \* if channel end is open - IF /\ channelEnd.state = "OPEN" - \* srcChannelID and srcPortID match channel and port IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - \* dstChannelID and dstPortID match counterparty channel and port IDs - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* packet has timed out - /\ packet.timeoutHeight > 0 - /\ proofHeight >= packet.timeoutHeight - \* chain has sent the packet - /\ packetCommitment \in chain.packetCommitments - \* counterparty chain has not received the packet - /\ packetReceipt \notin counterpartyChain.packetReceipts - \* remove packet commitment - THEN LET updatedChainStore == - [chain EXCEPT !.packetCommitments = - chain.packetCommitments \ {packetCommitment}] IN - [store |-> updatedChainStore, - accounts |-> OnTimeoutPacketOutcome.accounts, - escrowAccounts |-> OnTimeoutPacketOutcome.escrowAccounts] - - \* otherwise, do not update the chain store and accounts - ELSE [store |-> chain, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts] - -\* check if a packet timed out on close -(* @type: (CHAINSTORE, CHAINSTORE, ACCOUNT -> Int, ACCOUNT -> Int, PACKET, Int, Int) - => [store: CHAINSTORE, accounts: ACCOUNT -> Int, escrowAccounts: ACCOUNT -> Int]; -*) -TimeoutOnClose(chain, counterpartyChain, accounts, escrowAccounts, - packet, proofHeight, maxBalance) == - \* get channel end - LET channelEnd == chain.channelEnd IN - \* get counterparty channel end - LET counterpartyChannelEnd == counterpartyChain.channelEnd IN - - \* get packet committment that should be in chain store - LET packetCommitment == [ - portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - data |-> packet.data, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight - ] IN - \* get packet receipt that should be absent in counterparty chain store - LET packetReceipt == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence - ] IN - - \* call application function OnTimeoutPacket - LET OnTimeoutPacketOutcome == - OnTimeoutPacket(accounts, escrowAccounts, packet, maxBalance) IN - - \* if srcChannelID and srcPortID match channel and port IDs - IF /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - \* if dstChannelID and dstPortID match counterparty channel and port IDs - /\ packet.dstPort = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* chain has sent the packet - /\ packetCommitment \in chain.packetCommitments - \* counterparty channel end is closed and its fields are as expected - /\ counterpartyChannelEnd.state = "CLOSED" - /\ counterpartyChannelEnd.order = "UNORDERED" - /\ counterpartyChannelEnd.portID = packet.dstPortID - /\ counterpartyChannelEnd.channelID = packet.dstChannelID - /\ counterpartyChannelEnd.counterpartyChannelID = packet.srcChannelID - /\ counterpartyChannelEnd.counterpartyPortID = packet.srcPortID - /\ counterpartyChannelEnd.version = channelEnd.version - \* counterparty chain has not received the packet - /\ packetReceipt \notin counterpartyChain.packetReceipts - \* remove packet commitment - THEN LET updatedChainStore == - [chain EXCEPT !.packetCommitments = - chain.packetCommitments \ {packetCommitment}] IN - [store |-> updatedChainStore, - accounts |-> OnTimeoutPacketOutcome.accounts, - escrowAccounts |-> OnTimeoutPacketOutcome.escrowAccounts] - - \* otherwise, do not update the chain store and accounts - ELSE [store |-> chain, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts] - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 15:36:57 CEST 2021 by ilinastoilkovska -\* Created Thu Oct 19 18:29:58 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/fungible-token-transfer/ICS20FungibleTokenTransferHandlers.tla b/docs/spec/tla/fungible-token-transfer/ICS20FungibleTokenTransferHandlers.tla deleted file mode 100644 index 846a3066c6..0000000000 --- a/docs/spec/tla/fungible-token-transfer/ICS20FungibleTokenTransferHandlers.tla +++ /dev/null @@ -1,200 +0,0 @@ ------------------ MODULE ICS20FungibleTokenTransferHandlers ---------------- - -(*************************************************************************** - This module contains definitions of operators that are used to handle - ICS20 packet datagrams. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, Bank, IBCTokenTransferDefinitions - -\* create outgoing packet data -\* - accounts is the map of bank accounts -\* - escrowAccounts is the map of escrow accounts -\* - sender, receiver are chain IDs (used as addresses) -\* @type: (ACCOUNT -> Int, ACCOUNT -> Int, Seq(Str), Int, Str, Str) => [denomination: Seq(Str), amount: Int, sender: Str, receiver: Str]; -CreateOutgoingPacketData(accounts, escrowAccounts, denomination, amount, sender, receiver) == - \* sending chain is source if the denomination is of length 1 - \* or if the denomination is not prefixed by the sender's port and channel ID - LET source == \/ Len(denomination) = 1 - \/ SubSeq(denomination, 1, 2) /= <> IN - - \* create packet data - LET data == - [ - denomination |-> denomination, - amount |-> amount, - sender |-> sender, - receiver |-> receiver - ] IN - - \* get the outcome of TransferCoins from the sender account to the escrow account - LET transferCoinsOutcome == - TransferCoins(accounts, sender, escrowAccounts, GetCounterpartyChannelID(sender), denomination, amount) IN - - \* get the outcome of BurnCoins applied to the sender account - LET burnCoinsOutcome == - BurnCoins(accounts, sender, denomination, amount) IN - - IF /\ source - /\ ~transferCoinsOutcome.error - \* if source and the coin transfer is successful, - \* update bank accounts and escrow accounts using the outcome from TransferCoins - THEN [ - packetData |-> data, - accounts |-> transferCoinsOutcome.senderAccounts, - escrowAccounts |-> transferCoinsOutcome.receiverAccounts, - error |-> FALSE - ] - \* if not source and the coin burning is successful, - \* update bank accounts using the outcome from BurnCoins - ELSE IF /\ ~source - /\ ~burnCoinsOutcome.error - THEN [ - packetData |-> data, - accounts |-> burnCoinsOutcome.accounts, - escrowAccounts |-> escrowAccounts, - error |-> FALSE - ] - \* otherwise, there is an error - ELSE [ - packetData |-> data, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts, - error |-> TRUE - ] - -\* receive an ICS20 packet -(* @type: (CHAINSTORE, ACCOUNT -> Int, ACCOUNT -> Int, PACKET, Int) => - [packetAck: Bool, accounts: ACCOUNT -> Int, escrowAccounts: ACCOUNT -> Int, error: Bool]; -*) -OnPacketRecv(chain, accounts, escrowAccounts, packet, maxBalance) == - \* get packet data and denomination - LET data == packet.data IN - LET denomination == data.denomination IN - - \* receiving chain is source if - \* the denomination is prefixed by srcPortID and srcChannelID - LET source == /\ Len(denomination) > 1 - /\ SubSeq(denomination, 1, 2) = <> IN - - LET unprefixedDenomination == SubSeq(denomination, 3, Len(denomination)) IN - LET prefixedDenomination == <> \o denomination IN - - \* get the outcome of TransferCoins from the escrow - \* to the receiver account - LET transferCoinsOutcome == - TransferCoins( - escrowAccounts, packet.dstChannelID, - accounts, data.receiver, - unprefixedDenomination, data.amount - ) IN - - \* get the outcome of MintCoins with prefixedDenomination - \* to the receiver account - LET mintCoinsOutcome == - MintCoins( - accounts, data.receiver, - prefixedDenomination, data.amount, - maxBalance - ) IN - - IF /\ source - /\ ~transferCoinsOutcome.error - \* if source and the coin transfer is successful, - \* update bank accounts and escrow accounts using the outcome from TransferCoins - THEN - [ - packetAck |-> TRUE, - accounts |-> transferCoinsOutcome.receiverAccounts, - escrowAccounts |-> transferCoinsOutcome.senderAccounts, - error |-> FALSE - ] - \* if not source and minting coins is successful - \* update bank accounts using the outcome from MintCoins - ELSE IF /\ ~source - /\ ~mintCoinsOutcome.error - THEN [ - packetAck |-> TRUE, - accounts |-> mintCoinsOutcome.accounts, - escrowAccounts |-> escrowAccounts, - error |-> FALSE - ] - \* otherwise, there is an error - ELSE [ - packetAck |-> FALSE, - accounts |-> accounts, - escrowAccounts |-> escrowAccounts, - error |-> TRUE - ] - -\* refund tokens on unsuccessful ack -(* @type: (ACCOUNT -> Int, ACCOUNT -> Int, PACKET, Int) => - [accounts: ACCOUNT -> Int, escrowAccounts: ACCOUNT -> Int]; -*) -RefundTokens(accounts, escrowAccounts, packet, maxBalance) == - \* get packet data and denomination - LET data == packet.data IN - LET denomination == data.denomination IN - - \* chain is source if the denomination is of length 1 - \* or if the denomination is not prefixed by srcPortID and srcChannelID - LET source == \/ Len(denomination) = 1 - \/ SubSeq(denomination, 1, 2) /= <> IN - - \* get the outcome of TransferCoins from the escrow - \* to the sender account - LET transferCoinsOutcome == - TransferCoins( - escrowAccounts, packet.srcChannelID, - accounts, data.sender, - denomination, data.amount - ) IN - - \* get the outcome of MintCoins with denomination - \* to the sender account - LET mintCoinsOutcome == - MintCoins( - accounts, data.sender, - denomination, data.amount, - maxBalance - ) IN - - IF /\ source - /\ ~transferCoinsOutcome.error - \* if source and the coin transfer is successful, - \* update bank accounts and escrow accounts using the outcome from TransferCoins - THEN [ - accounts |-> transferCoinsOutcome.receiverAccounts, - escrowAccounts |-> transferCoinsOutcome.senderAccounts - ] - \* if not source and minting coins is successful - \* update bank accounts using the outcome from MintCoins - ELSE IF /\ ~source - /\ ~mintCoinsOutcome.error - THEN [ - accounts |-> mintCoinsOutcome.accounts, - escrowAccounts |-> escrowAccounts - ] - \* otherwise, do not update anything - ELSE [ - accounts |-> accounts, - escrowAccounts |-> escrowAccounts - ] - -\* acknowledge an ICS20 packet -OnPaketAck(accounts, escrowAccounts, packet, ack, maxBalance) == - IF ~ack - THEN RefundTokens(accounts, escrowAccounts, packet, maxBalance) - ELSE [ - accounts |-> accounts, - escrowAccounts |-> escrowAccounts - ] - -\* timeout an ICS20 packet -OnTimeoutPacket(accounts, escrowAccounts, packet, maxBalance) == - RefundTokens(accounts, escrowAccounts, packet, maxBalance) - -============================================================================= -\* Modification History -\* Last modified Wed Apr 14 15:37:37 CEST 2021 by ilinastoilkovska -\* Created Mon Oct 17 13:02:01 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/fungible-token-transfer/MC_IBCTokenTransfer.tla b/docs/spec/tla/fungible-token-transfer/MC_IBCTokenTransfer.tla deleted file mode 100644 index dcd99c6e28..0000000000 --- a/docs/spec/tla/fungible-token-transfer/MC_IBCTokenTransfer.tla +++ /dev/null @@ -1,31 +0,0 @@ -------------------------- MODULE MC_IBCTokenTransfer ------------------------ - -MaxHeight == 5 -MaxPacketSeq == 5 -MaxBalance == 5 -NativeDenominationChainA == "atom" -NativeDenominationChainB == "eth" - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* store of ChainB - \* @type: Seq(DATAGRAM); - packetDatagramsChainA, \* sequence of packet datagrams incoming to ChainA - \* @type: Seq(DATAGRAM); - packetDatagramsChainB, \* sequence of packet datagrams incoming to ChainB - \* @type: Seq(LOGENTRY); - packetLog, \* packet log - \* @type: Int; - appPacketSeqChainA, \* packet sequence number from the application on ChainA - \* @type: Int; - appPacketSeqChainB, \* packet sequence number from the application on ChainB - \* @type: ACCOUNT -> Int; - accounts, \* a map from chainIDs and denominations to account balances - \* @type: ACCOUNT -> Int; - escrowAccounts \* a map from channelIDs and denominations to escrow account balances - -INSTANCE IBCTokenTransfer - -============================================================================= \ No newline at end of file diff --git a/docs/spec/tla/fungible-token-transfer/README.md b/docs/spec/tla/fungible-token-transfer/README.md deleted file mode 100644 index 5d7a7199d4..0000000000 --- a/docs/spec/tla/fungible-token-transfer/README.md +++ /dev/null @@ -1,218 +0,0 @@ -# TLA+ specification of the IBC Fungible Token Transfer Protocol - -This document describes the TLA+ model of the core logic of the English -specification [ICS -20](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-020-fungible-token-transfer). We -start by discussing [the model of the -protocol](#the-model-of-the-protocol). - Then this document provides links to our TLA+ formalization of [Properties and -invariants](#properties-and-invariants) that formalizes what a fungible -token protocol is supposed to achieve. -After that we discuss how to [use the model](#using-the-model). - -## The Model of the Protocol - - Mirroring -the structure of the English specification, we start by discussing -initialization ([Port and Channel Setup & Channel lifecycle management](#port-and-channel-setup-and-channel-lifecycle-management)), and then provide the links to the TLA+ modules that -implement [packet relay](#packet-relay), that is, the core callback functions. - -As the application "fungible token transfer" uses the underlying IBC -infrastructure, we also modeled it to the extent necessary in [helper -modules](#helper-modules). - -### Port and Channel Setup and Channel lifecycle management - - -In the model we assume that the [`setup()`](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-020-fungible-token-transfer#port--channel-setup) function has been called -before. The [channel handshake -functions](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-020-fungible-token-transfer#channel-lifecycle-management) -are also considered being already executed. Our -model starts from a state where the channel handshake has completed -successfully. - -### Packet Relay - -The [core callback functions](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-020-fungible-token-transfer#packet-relay) -`createOutgoingPacket()`, `onRecvPacket()`, `onRecvPacket()` and - `onTimeoutPacket()`, as well as the auxiliary function `refundTokens()` - are modeled in - [ICS20FungibleTokenTransferHandlers.tla](ICS20FungibleTokenTransferHandlers.tla). - -### Helper modules - -In order to completely specify the behavior of fungible token -transfer, we encoded the required additional functionalities of IBC in -the TLA+ modules discussed below. From -the viewpoint of TLA+, [IBCTokenTransfer.tla](IBCTokenTransfer.tla) is -the main module that brings together all other modules that are -discussed here. We will discuss it the last. - - -#### [ICS04PacketHandlers.tla](ICS04PacketHandlers.tla) - -This module captures the functions -specifying packet flow and handling from [ICS -04](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics). - -#### [Bank.tla](Bank.tla) -The bank module encodes functions defined by the Cosmos bank - application. - -#### [Chain.tla](Chain.tla) - -This module captures the relevant - Cosmos SDK functionality, that is, the context in which token - transfer runs. In the complete TLA+ model it is instantiated twice, - once for each chain participating in the token transfer. - The transition relation is defined by - -```tla -Next == - \/ AdvanceChain - \/ HandlePacketDatagrams - \/ SendPacket - \/ AcknowledgePacket -``` - -- `AdvanceChain`: increments the height of the chain -- `HandlePacketDatagrams`: based on the datagram type of the next - incoming datagram (created in - [IBCTokenTransfer.tla](IBCTokenTransfer.tla); see below), it calls the - appropriate datagram handlers from ICS 04 - ([ICS04PacketHandlers.tla](ICS04PacketHandlers.tla)), which in turn call the - ICS 20 module callbacks specified in - [ICS20FungibleTokenTransferHandlers.tla](ICS20FungibleTokenTransferHandlers.tla). - This result in an update of the application state (bank accounts, - packet log, provable and private store). -- `SendPacket`: models that a user wants to initiate a transfer -- `AcknowledgePacket`: writes an acknowledgement for a received packet - on the packet log. - - -#### [IBCTokenTransfer.tla](IBCTokenTransfer.tla) -This is the main module that - brings everything together. It specifies a transitions system - consisting of two chains ([Chain.tla](Chain.tla)) and a - relayer node (modelled here). -```tla -Next == - \/ ChainAction - \/ EnvironmentAction - \/ UNCHANGED vars -``` - -- `ChainAction` performs an action of one non-deterministically chosen - chain. - -- `EnvironmentAction` performs the relayer logic, that is, reads the - packet log and creates an appropriate datagram for the destination - chain (`CreateDatagrams`). - - -### Properties and invariants - -The English specification provides informal requirements as "[desired properties]( -https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-020-fungible-token-transfer#desired-properties)". - -#### Preservation of fungibility - -We understand that for establishing "Preservation of fungibility" it -is sufficient to establish that if -some tokens have been transferred from chain A to chain B, and the receiver -on chain B wants to return them, then the tokens can be returned. - -For this we require the assumption (which is somewhat implicit it - its [correctness -argument](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-020-fungible-token-transfer#correctness)) that the source chain only performs valid transitions. - -This is implemented in the property `ICS20Prop` in the file [IBCTokenTransfer.tla](IBCTokenTransfer.tla). - - -#### Preservation of total supply - -We understand "Preservation of total supply" as conjunction of two -properties - -- For each native denomination of a chain: the sum of the amounts in - user accounts in this denomination and the amounts in escrow - accounts in this denomination is constant. - -The following intuitive property can only be specified and guaranteed -if all involved chains only perform valid transitions: - -- The amount in denomination *d* in escrow accounts in the chain in which *d* is native -is equal to the sum of: - * the amounts in-flight packets in a (prefixed or unprefixed) denomination ending with *d* - * the amounts in accounts in a prefixed denomination ending with *d*, in which *d* is -**not** native - -These two properties are implemented in the invariant `ICS20Inv` in the file -[IBCTokenTransfer.tla](IBCTokenTransfer.tla). - -#### No Whitelist - -This is a design requirement, and not a correctness property that can be expressed -in temporal logic. - - -#### Symmetric - -This is not a temporal property but a property on the local transition -relation. It is satisfied by construction (of both the code and the -model). - - -#### No Byzantine Inflation - -This should be implied by the first property of preservation of total -supply. This is under the assumption that the property found in ICS 20 -"Fault containment: prevents Byzantine-inflation of tokens originating -on chain A, as a result of chain B’s Byzantine behavior (though any -users who sent tokens to chain B may be at risk)." is purely -understood in terms on inflation **on chain A**. - -We note that chain B can send an unbounded amount of tokens that it -claims to originate from A to some chain C. - - -## Using the Model - - -### Constants - -The module `IBCTokenTransfer.tla` is parameterized by the constants: - - `MaxHeight`, a natural number denoting the maximal height of the chains, - - `MaxPacketSeq`, a natural number denoting the maximal packet sequence number, - - `MaxBalance`, a natural number denoting the maximal bank account balance, - - `NativeDenominationChainA`, a string denoting the native denomination of `ChainA`, - - `NativeDenominationChainB`, a string denoting the native denomination of `ChainB` - - We assume that the native denominations of the chains are different. - - -### Importing the specification into TLA+ toolbox - -To import the specification in the TLA+ toolbox and run TLC: - - add a new spec in TLA+ toolbox with the root-module file `IBCTokenTransfer.tla` - - create a model - - assign a value to the constants (example values can be found in `IBCTokenTransfer.cfg`) - - choose "Temporal formula" as the behavior spec, and use the formula `Spec` - - run TLC on the model - -#### Basic checks with TLC - -We ran TLC using the constants defined in `IBCTokenTransfer.cfg` and verified the invariants `TypeOK` and `ICS20Inv` in 1min21s and the property `ICS20Prop` in 9min34s. -We note that the specification currently models two transfers: one from `ChainA` to `ChainB`, and vice versa, in their respective native denominations. -Both chains are correct, and there is no malicious relayer. -The relayer implements the logic from [ICS 18](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-018-relayer-algorithms), in particular, it does not -relay timeouts. -However, the packet timeout handlers are specified in [`ICS04PacketHandlers.tla`](ICS04PacketHandlers.tla) -for future use. - -#### Apalache - -The specification contains type annotations for the -model checker [Apalache](https://github.com/informalsystems/apalache). -The specification passes the type check using the type checker [Snowcat](https://apalache.informal.systems/docs/apalache/typechecker-snowcat.html) -integrated in Apalache. diff --git a/docs/spec/tla/ibc-core/Chain.tla b/docs/spec/tla/ibc-core/Chain.tla deleted file mode 100644 index 500c985dec..0000000000 --- a/docs/spec/tla/ibc-core/Chain.tla +++ /dev/null @@ -1,290 +0,0 @@ ----------------------------- MODULE Chain ---------------------------- - -(*************************************************************************** - This module models the behavior of a chain running the IBC Core Protocols. - - The chain state is represented by a chain store, which is a snapshot of the - provable and private stores, to the extent necessary for IBC. Additionally, - a chain has dedicated datagram containers for: - 1. client, connection, and channel datagrams (given by a set of datagrams), - 2. packet datagrams (given by a queue of datagrams that models the order - in which the datagrams were submitted by the relayer). - -***************************************************************************) - -EXTENDS Integers, FiniteSets, IBCCoreDefinitions, - ICS02ClientHandlers, ICS03ConnectionHandlers, - ICS04ChannelHandlers, ICS04PacketHandlers - -CONSTANTS - MaxHeight, \* maximal chain height - ChainID, \* chain identifier - ChannelOrdering, \* indicate whether the channels are ordered or unordered - MaxVersion, \* maximal connection / channel version (we assume versions are integers) - MaxPacketSeq \* maximal packet sequence number - -VARIABLES - chainStore, \* chain store, containing client heights, a connection end, a channel end - incomingDatagrams, \* set of incoming datagrams - incomingPacketDatagrams, \* sequence of incoming packet datagrams - history, \* history variable - packetLog, \* packet log - appPacketSeq \* packet sequence number from the application on the chain - -vars == <> -Heights == 1..MaxHeight \* set of possible heights of the chains in the system -Versions == 1..MaxVersion \* set of possible connection versions - -(*************************************************************************** - Client update operators - ***************************************************************************) -\* Update the clients on chain with chainID, -\* using the client datagrams generated by the relayer -\* (Handler operators defined in ICS02ClientHandlers.tla) -LightClientUpdate(chainID, store, datagrams) == - \* create clients - LET clientCreatedStore == HandleCreateClient(chainID, store, datagrams) IN - \* update clients - LET clientUpdatedStore == HandleClientUpdate(chainID, clientCreatedStore, datagrams) IN - - clientUpdatedStore - -(*************************************************************************** - Connection update operators - ***************************************************************************) -\* Update the connections on chain with chainID, -\* using the connection datagrams generated by the relayer -\* (Handler operators defined in ICS03ConnectionHandlers.tla) -ConnectionUpdate(chainID, store, datagrams) == - \* update the chain store with "ConnOpenInit" datagrams - LET connOpenInitStore == HandleConnOpenInit(chainID, store, datagrams) IN - - \* update the chain store with "ConnOpenTry" datagrams - LET connOpenTryStore == HandleConnOpenTry(chainID, connOpenInitStore, datagrams) IN - - \* update the chain store with "ConnOpenAck" datagrams - LET connOpenAckStore == HandleConnOpenAck(chainID, connOpenTryStore, datagrams) IN - - \* update the chain store with "ConnOpenConfirm" datagrams - LET connOpenConfirmStore == HandleConnOpenConfirm(chainID, connOpenAckStore, datagrams) IN - - \* output the updated chain store - connOpenConfirmStore - -(*************************************************************************** - Channel update operators - ***************************************************************************) -\* Update the channel on chain with chainID, -\* using the channel datagrams generated by the relayer -\* (Handler operators defined in ICS04ChannelHandlers.tla) -ChannelUpdate(chainID, store, datagrams) == - \* update the chain store with "ChanOpenInit" datagrams - LET chanOpenInitStore == HandleChanOpenInit(chainID, store, datagrams) IN - - \* update the chain store with "ChanOpenTry" datagrams - LET chanOpenTryStore == HandleChanOpenTry(chainID, chanOpenInitStore, datagrams) IN - - \* update the chain store with "ChanOpenAck" datagrams - LET chanOpenAckStore == HandleChanOpenAck(chainID, chanOpenTryStore, datagrams) IN - - \* update the chain store with "ChanOpenConfirm" datagrams - LET chanOpenConfirmStore == HandleChanOpenConfirm(chainID, chanOpenAckStore, datagrams) IN - - \* update the chain store with "ChanCloseInit" datagrams - LET chanCloseInitStore == HandleChanCloseInit(chainID, chanOpenConfirmStore, datagrams) IN - - \* update the chain store with "ChanCloseConfirm" datagrams - LET chanCloseConfirmStore == HandleChanCloseConfirm(chainID, chanCloseInitStore, datagrams) IN - - chanCloseConfirmStore - -(*************************************************************************** - Packet update operators - ***************************************************************************) -\* Update the chain store of the chain with chainID and the packet log, -\* using the packet datagrams generated by the relayer -\* (Handler operators defined in ICS04PacketHandlers.tla) -\* @type: (Str, CHAINSTORE, Seq(DATAGRAM), Seq(LOGENTRY)) => [chainStore: CHAINSTORE, packetLog: Seq(LOGENTRY)]; -PacketUpdate(chainID, store, packetDatagrams, log) == - \* if the sequence of packet datagrams is not empty - IF packetDatagrams /= <<>> - THEN \* process the packet datagram at the head of the sequence - LET packetDatagram == Head(packetDatagrams) IN - LET packet == packetDatagram.packet IN - \* get the new updated store and packet log entry - LET newStoreAndLog == - IF packetDatagram.type = "PacketRecv" - THEN HandlePacketRecv(chainID, store, packetDatagram, log) - ELSE IF packetDatagram.type = "PacketAck" - THEN HandlePacketAck(chainID, store, packetDatagram, log) - ELSE [chainStore|-> store, packetLogEntry |-> log] IN - newStoreAndLog - ELSE [chainStore |-> store, packetLog |->log] - -(*************************************************************************** - Chain update operators - ***************************************************************************) -\* Update chainID with the received datagrams -\* Supports ICS02 (Clients), ICS03 (Connections), and ICS04 (Channels & Packets). -UpdateChainStoreAndPacketLog(chainID, chain, datagrams, packetDatagrams, log) == - - \* ICS02: Client updates - LET clientUpdatedStore == LightClientUpdate(chainID, chain, datagrams) IN - - \* ICS03: Connection updates - LET connectionUpdatedStore == ConnectionUpdate(chainID, clientUpdatedStore, datagrams) IN - - \* ICS04: Channel updates - LET channelUpdatedStore == ChannelUpdate(chainID, connectionUpdatedStore, datagrams) IN - - \* ICS04: Packet transmission - LET packetUpdatedStoreAndLog == PacketUpdate(chainID, channelUpdatedStore, packetDatagrams, log) IN - LET packetUpdatedStore == packetUpdatedStoreAndLog.chainStore IN - - \* update height - LET updatedChainStore == - IF /\ chainStore /= packetUpdatedStore - /\ chainStore.height + 1 \in Heights - THEN [packetUpdatedStore EXCEPT !.height = chainStore.height + 1] - ELSE packetUpdatedStore - IN - - [chainStore |-> updatedChainStore, - packetLog |-> packetUpdatedStoreAndLog.packetLog] - -(*************************************************************************** - Chain actions - ***************************************************************************) -\* Advance the height of the chain until MaxHeight is reached -AdvanceChain == - /\ chainStore.height + 1 \in Heights - /\ chainStore' = [chainStore EXCEPT !.height = chainStore.height + 1] - /\ UNCHANGED <> - /\ UNCHANGED <> - -\* Send a packet -SendPacket == - \* enabled if appPacketSeq is not bigger than MaxPacketSeq - /\ appPacketSeq <= MaxPacketSeq - \* Create a packet: Abstract away from packet data, ports, and timestamp. - \* Assume timeoutHeight is MaxHeight - /\ LET packet == [ - sequence |-> appPacketSeq, - timeoutHeight |-> MaxHeight, - srcPortID |-> chainStore.connectionEnd.channelEnd.portID, - srcChannelID |-> chainStore.connectionEnd.channelEnd.channelID, - dstPortID |-> chainStore.connectionEnd.channelEnd.counterpartyPortID, - dstChannelID |-> chainStore.connectionEnd.channelEnd.counterpartyChannelID - ] IN - LET updatedChainStore == WritePacketCommitment(chainStore, packet) IN - \* if writing the packet commitment was successful - /\ chainStore /= updatedChainStore - \* update chain store with packet committment - /\ chainStore' = updatedChainStore - \* log sent packet - /\ packetLog' = Append(packetLog, [ - type |-> "PacketSent", - srcChainID |-> ChainID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight - ]) - \* increase application packet sequence - /\ appPacketSeq' = appPacketSeq + 1 - /\ UNCHANGED <> - -\* write a packet acknowledgement on the packet log and chain store -AcknowledgePacket == - /\ chainStore.packetsToAcknowledge /= <<>> - /\ chainStore' = WriteAcknowledgement(chainStore, Head(chainStore.packetsToAcknowledge)) - /\ packetLog' = LogAcknowledgement(ChainID, chainStore, packetLog, Head(chainStore.packetsToAcknowledge)) - /\ UNCHANGED <> - /\ UNCHANGED appPacketSeq - -\* Handle the datagrams and update the chain state -HandleIncomingDatagrams == - /\ \/ incomingDatagrams /= {} - \/ incomingPacketDatagrams /= <<>> - /\ LET updatedChainStoreAndPacketLog == - UpdateChainStoreAndPacketLog(ChainID, chainStore, incomingDatagrams, incomingPacketDatagrams, packetLog) IN - /\ chainStore' = updatedChainStoreAndPacketLog.chainStore - /\ packetLog' = updatedChainStoreAndPacketLog.packetLog - /\ incomingDatagrams' = {} - /\ incomingPacketDatagrams' = IF incomingPacketDatagrams /= <<>> - THEN Tail(incomingPacketDatagrams) - ELSE incomingPacketDatagrams - /\ history' = CASE chainStore'.connectionEnd.state = "INIT" - -> [history EXCEPT !.connInit = TRUE] - [] chainStore'.connectionEnd.state = "TRYOPEN" - -> [history EXCEPT !.connTryOpen = TRUE] - [] chainStore'.connectionEnd.state = "OPEN" - -> [history EXCEPT !.connOpen = TRUE] - [] chainStore'.connectionEnd.channelEnd.state = "INIT" - -> [history EXCEPT !.chanInit = TRUE] - [] chainStore'.connectionEnd.channelEnd.state = "TRYOPEN" - -> [history EXCEPT !.chanTryOpen = TRUE] - [] chainStore'.connectionEnd.channelEnd.state = "OPEN" - -> [history EXCEPT !.chanOpen = TRUE] - [] chainStore'.connectionEnd.channelEnd.state = "CLOSED" - -> [history EXCEPT !.chanClosed = TRUE] - [] OTHER - -> history - /\ UNCHANGED appPacketSeq - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -\* Initially -\* - each chain is initialized to some element of the set -\* InitChainStores (defined in IBCCoreDefinitions.tla) -\* - pendingDatagrams for each chain is empty -\* - the packetSeq is set to 1 -Init == - /\ chainStore \in InitChainStore(Versions, ChannelOrdering) - /\ incomingDatagrams = {} - /\ incomingPacketDatagrams = <<>> - /\ history = InitHistory - /\ appPacketSeq = 1 - -\* Next state action -\* The chain either -\* - advances its height -\* - receives datagrams and updates its state -\* - sends a packet if the appPacketSeq is not bigger than MaxPacketSeq -\* - acknowledges a packet -Next == - \/ AdvanceChain - \/ HandleIncomingDatagrams - \/ SendPacket - \/ AcknowledgePacket - \/ UNCHANGED vars - -Fairness == - /\ WF_vars(Next) - -(*************************************************************************** - Invariants - ***************************************************************************) -\* Type invariant -\* ChainStores, Datagrams, PacketLogEntries are defined in IBCCoreDefinitions.tla -TypeOK == - /\ chainStore \in ChainStores(Heights, ChannelOrdering, MaxPacketSeq, Versions) - /\ incomingDatagrams \in SUBSET Datagrams(Heights, MaxPacketSeq, Versions) - /\ incomingPacketDatagrams \in Seq(Datagrams(Heights, MaxPacketSeq, Versions)) - /\ history \in Histories - /\ appPacketSeq \in 1..(MaxPacketSeq + 1) - /\ packetLog \in Seq(PacketLogEntries(Heights, MaxPacketSeq)) - -(*************************************************************************** - Properties - ***************************************************************************) -\* it ALWAYS holds that the height of the chain does not EVENTUALLY decrease -HeightDoesntDecrease == - [](\A h \in Heights : chainStore.height = h - => <>(chainStore.height >= h)) - -============================================================================= -\* Modification History -\* Last modified Fri Feb 05 13:46:33 CET 2021 by ilinastoilkovska -\* Created Fri Jun 05 16:56:21 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/ibc-core/IBCCore.cfg b/docs/spec/tla/ibc-core/IBCCore.cfg deleted file mode 100644 index 180958f961..0000000000 --- a/docs/spec/tla/ibc-core/IBCCore.cfg +++ /dev/null @@ -1,24 +0,0 @@ -CONSTANTS - MaxHeight = 2 - MaxVersion = 1 - MaxPacketSeq = 1 - ClientDatagramsRelayer1 = TRUE - ClientDatagramsRelayer2 = FALSE - ConnectionDatagramsRelayer1 = TRUE - ConnectionDatagramsRelayer2 = FALSE - ChannelDatagramsRelayer1 = TRUE - ChannelDatagramsRelayer2 = FALSE - PacketDatagramsRelayer1 = TRUE - PacketDatagramsRelayer2 = FALSE - ChannelOrdering = "UNORDERED" - -INIT Init -NEXT Next - -INVARIANTS - TypeOK - IBCInv - -\* PROPERTIES -\* IBCSafety -\* IBCDelivery \ No newline at end of file diff --git a/docs/spec/tla/ibc-core/IBCCore.tla b/docs/spec/tla/ibc-core/IBCCore.tla deleted file mode 100644 index 2c3447bc60..0000000000 --- a/docs/spec/tla/ibc-core/IBCCore.tla +++ /dev/null @@ -1,610 +0,0 @@ ------------------------------- MODULE IBCCore ------------------------------ - -(*************************************************************************** - A TLA+ specification of the IBC Core protocols (ICS02, ICS03, ICS04, ICS18). - This module is the main module in the specification and models a - system consisting of two chains and two relayers. - - The model allows to express concurrency aspects of a system with multiple - (correct) relayers. The specification is written in a modular way, in order - to facilitate future formal verification of properties and invariants in - an adversarial setting. - - The specification also contains type annotations for the model checker - Apalache. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, IBCCoreDefinitions - -CONSTANTS - \* @type: Int; - MaxHeight, \* maximal height of all the chains in the system - \* @type: Int; - MaxVersion, \* maximal connection / channel version (we assume versions are integers) - \* @type: Int; - MaxPacketSeq, \* maximal packet sequence number - \* @type: Bool; - ClientDatagramsRelayer1, \* toggle generation of client datagrams for Relayer1 - \* @type: Bool; - ClientDatagramsRelayer2, \* toggle generation of client datagrams for Relayer2 - \* @type: Bool; - ConnectionDatagramsRelayer1, \* toggle generation of connection datagrams for Relayer1 - \* @type: Bool; - ConnectionDatagramsRelayer2, \* toggle generation of connection datagrams for Relayer2 - \* @type: Bool; - ChannelDatagramsRelayer1, \* toggle generation of channel datagrams for Relayer1 - \* @type: Bool; - ChannelDatagramsRelayer2, \* toggle generation of channel datagrams for Relayer2 - \* @type: Bool; - PacketDatagramsRelayer1, \* toggle generation of packet datagrams for Relayer1 - \* @type: Bool; - PacketDatagramsRelayer2, \* toggle generation of packet datagrams for Relayer2 - \* @type: Str; - ChannelOrdering \* indicate whether the channels are ordered or unordered - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* chain store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* chain store of ChainB - \* @type: Set(DATAGRAM); - incomingDatagramsChainA, \* set of (client, connection, channel) datagrams incoming to ChainA - \* @type: Set(DATAGRAM); - incomingDatagramsChainB, \* set of (client, connection, channel) datagrams incoming to ChainB - \* @type: Seq(DATAGRAM); - incomingPacketDatagramsChainA, \* sequence of packet datagrams incoming to ChainA - \* @type: Seq(DATAGRAM); - incomingPacketDatagramsChainB, \* sequence of packet datagrams incoming to ChainB - \* @type: Str -> Int; - relayer1Heights, \* the client heights of Relayer1 - \* @type: Str -> Int; - relayer2Heights, \* the client heights of Relayer2 - \* @type: Str -> Set(DATAGRAM); - outgoingDatagrams, \* sets of (client, connection, channel) datagrams outgoing of the relayers - \* @type: Str -> Seq(DATAGRAM); - outgoingPacketDatagrams, \* sequences of packet datagrams outgoing of the relayers - \* @type: Bool; - closeChannelA, \* flag that triggers closing of the channel end at ChainA - \* @type: Bool; - closeChannelB, \* flag that triggers closing of the channel end at ChainB - \* @type: HISTORY; - historyChainA, \* history variables for ChainA - \* @type: HISTORY; - historyChainB, \* history variables for ChainB - \* @type: Seq(LOGENTRY); - packetLog, \* packet log - \* @type: Int; - appPacketSeqChainA, \* packet sequence number from the application on ChainA - \* @type: Int; - appPacketSeqChainB \* packet sequence number from the application on ChainB - -vars == <> - -chainAvars == <> -chainBvars == <> -relayerVars == <> -Heights == 1..MaxHeight \* set of possible heights of the chains in the system - - -(*************************************************************************** - Instances of Relayer and Chain - ***************************************************************************) - -\* We suppose there are two correct relayers in the system, Relayer1 and Relayer2 -\* Relayer1 -- Instance of ICS18Relayer.tla -Relayer1 == INSTANCE ICS18Relayer - WITH GenerateClientDatagrams <- ClientDatagramsRelayer1, - GenerateConnectionDatagrams <- ConnectionDatagramsRelayer1, - GenerateChannelDatagrams <- ChannelDatagramsRelayer1, - GeneratePacketDatagrams <- PacketDatagramsRelayer1, - relayerHeights <- relayer1Heights - -\* Relayer2 -- Instance of ICS18Relayer.tla -Relayer2 == INSTANCE ICS18Relayer - WITH GenerateClientDatagrams <- ClientDatagramsRelayer2, - GenerateConnectionDatagrams <- ConnectionDatagramsRelayer2, - GenerateChannelDatagrams <- ChannelDatagramsRelayer2, - GeneratePacketDatagrams <- PacketDatagramsRelayer2, - relayerHeights <- relayer2Heights - -\* We suppose there are two chains that communicate, ChainA and ChainB -\* ChainA -- Instance of Chain.tla -ChainA == INSTANCE Chain - WITH ChainID <- "chainA", - chainStore <- chainAstore, - incomingDatagrams <- incomingDatagramsChainA, - incomingPacketDatagrams <- incomingPacketDatagramsChainA, - history <- historyChainA, - appPacketSeq <- appPacketSeqChainA - -\* ChainB -- Instance of Chain.tla -ChainB == INSTANCE Chain - WITH ChainID <- "chainB", - chainStore <- chainBstore, - incomingDatagrams <- incomingDatagramsChainB, - incomingPacketDatagrams <- incomingPacketDatagramsChainB, - history <- historyChainB, - appPacketSeq <- appPacketSeqChainB - -(*************************************************************************** - Component actions - ***************************************************************************) - -\* RelayerAction: either correct relayer takes a step, leaving the other -\* variables unchanged -RelayerAction == - \/ /\ Relayer1!Next - /\ UNCHANGED chainAvars - /\ UNCHANGED chainBvars - /\ UNCHANGED relayer2Heights - /\ UNCHANGED <> - \/ /\ Relayer2!Next - /\ UNCHANGED chainAvars - /\ UNCHANGED chainBvars - /\ UNCHANGED relayer1Heights - /\ UNCHANGED <> - -\* ChainAction: either chain takes a step, leaving the other -\* variables unchanged -ChainAction == - \/ /\ ChainA!Next - /\ UNCHANGED chainBvars - /\ UNCHANGED relayerVars - /\ UNCHANGED <> - \/ /\ ChainB!Next - /\ UNCHANGED chainAvars - /\ UNCHANGED relayerVars - /\ UNCHANGED <> - -(*************************************************************************** - IBCCore Environment actions - ***************************************************************************) -\* Submit datagrams from relayers to chains -SubmitDatagrams == - /\ incomingDatagramsChainA' = incomingDatagramsChainA \union outgoingDatagrams["chainA"] - /\ incomingDatagramsChainB' = incomingDatagramsChainB \union outgoingDatagrams["chainB"] - /\ outgoingDatagrams' = [chainID \in ChainIDs |-> {}] - /\ incomingPacketDatagramsChainA' = incomingPacketDatagramsChainA \o outgoingPacketDatagrams["chainA"] - /\ incomingPacketDatagramsChainB' = incomingPacketDatagramsChainB \o outgoingPacketDatagrams["chainB"] - /\ outgoingPacketDatagrams' = [chainID \in ChainIDs |-> <<>>] - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED <> - -\* Non-deterministically set channel closing flags -CloseChannels == - \/ /\ closeChannelA = FALSE - /\ closeChannelA' \in BOOLEAN - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED closeChannelB - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED <> - \/ /\ closeChannelB = FALSE - /\ closeChannelB' \in BOOLEAN - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED closeChannelA - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED <> - -EnvironmentAction == - \/ SubmitDatagrams - \/ CloseChannels - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -Init == - /\ ChainA!Init - /\ ChainB!Init - /\ Relayer1!Init - /\ Relayer2!Init - /\ closeChannelA = FALSE - /\ closeChannelB = FALSE - /\ packetLog = <<>> - -\* Next state action -Next == - \/ ChainAction - \/ RelayerAction - \/ EnvironmentAction - \/ UNCHANGED vars - -\* Fairness constraint -Fairness == - /\ WF_vars(SubmitDatagrams) - /\ ChainA!Fairness - /\ ChainB!Fairness - /\ Relayer1!Fairness - /\ Relayer2!Fairness - /\ <>[]closeChannelA - /\ <>[]closeChannelB - -\* Specification formula -Spec == Init /\ [][Next]_vars /\ Fairness - -(*************************************************************************** - Invariants - ***************************************************************************) - -\* Type invariant -TypeOK == - /\ ChainA!TypeOK - /\ ChainB!TypeOK - /\ Relayer1!TypeOK - /\ Relayer2!TypeOK - /\ closeChannelA \in BOOLEAN - /\ closeChannelB \in BOOLEAN - -(*************************************************************************** - Helper operators used in properties - ***************************************************************************) -\* get chain store by ID -\* @type: (Str) => CHAINSTORE; -GetChainByID(chainID) == - IF chainID = "chainA" - THEN chainAstore - ELSE chainBstore - -\* returns true if there is a "ClientUpdate" datagram -\* in the incoming datagrams for chainID -IsClientUpdateInIncomingDatagrams(chainID, h) == - LET clID == GetCounterpartyClientID(chainID) IN - IF chainID = "chainA" - THEN [type |-> "ClientUpdate", clientID |-> clID, height |-> h] - \in incomingDatagramsChainA - ELSE [type |-> "ClientUpdate", clientID |-> clID, height |-> h] - \in incomingDatagramsChainB - -\* returns true if there is a "ClientUpdate" datagram -\* in the outgoing datagrams for chainID -IsClientUpdateInOutgoingDatagrams(chainID, h) == - LET clID == GetCounterpartyClientID(chainID) IN - [type |-> "ClientUpdate", clientID |-> clID, height |-> h] - \in outgoingDatagrams[chainID] - -\* returns true if there is a "ConnOpenInit" datagram -\* in outgoing datagrams for chainID -IsConnOpenInitInOutgoingDatagrams(chainID) == - LET clID == GetClientID(chainID) IN - LET counterpartyClID == GetCounterpartyClientID(chainID) IN - LET connID == GetConnectionID(chainID) IN - LET counterpartyConnID == GetCounterpartyConnectionID(chainID) IN - - [type |-> "ConnOpenInit", - connectionID |-> connID, - clientID |-> clID, - counterpartyConnectionID |-> counterpartyConnID, - counterpartyClientID |-> counterpartyClID] \in outgoingDatagrams[chainID] - -\* returns true if there is a "ChanOpenInit" datagram -\* in outgoing datagrams for chainID -IsChanOpenInitInOutgoingDatagrams(chainID) == - LET chanID == GetChannelID(chainID) IN - LET counterpartyChanID == GetCounterpartyChannelID(chainID) IN - [type |-> "ChanOpenInit", - channelID |-> chanID, - counterpartyChannelID |-> counterpartyChanID] \in outgoingDatagrams[chainID] - -\* returns true if there is a "ChanCloseInit" datagram -\* in outgoing datagrams for chainID -IsChanCloseInitInOutgoingDatagrams(chainID) == - LET chanID == GetChannelID(chainID) IN - [type |-> "ChanCloseInit", - channelID |-> chanID] \in outgoingDatagrams[chainID] - - ----------------------------------------------------------------------------- -(*************************************************************************** - Invariants & Properties - ***************************************************************************) -(*************************************************************************** - Invariants: connection datagrams - ***************************************************************************) -\* once connInit is set to TRUE in the history variable, -\* the connection never goes to UNINIT -ConnectionInitInv == - /\ historyChainA.connInit => ~IsConnectionUninit(chainAstore) - /\ historyChainB.connInit => ~IsConnectionUninit(GetChainByID("chainB")) - -\* once connTryOpen is set to TRUE in the history variable, -\* the connection never goes to UNINIT -ConnectionTryOpenInv == - /\ historyChainA.connTryOpen => ~IsConnectionUninit(chainAstore) - /\ historyChainB.connTryOpen => ~IsConnectionUninit(GetChainByID("chainB")) - -\* once connOpen is set to TRUE in the history variable, -\* the connection never goes to UNINIT, INIT, or TRYOPEN -ConnectionOpenInv == - /\ historyChainA.connOpen => (/\ ~IsConnectionUninit(chainAstore) - /\ ~IsConnectionInit(chainAstore) - /\ ~IsConnectionTryOpen(chainAstore)) - /\ historyChainB.connOpen => (/\ ~IsConnectionUninit(GetChainByID("chainB")) - /\ ~IsConnectionInit(GetChainByID("chainB")) - /\ ~IsConnectionTryOpen(GetChainByID("chainB"))) - -(*************************************************************************** - Invariants: channel datagrams - ***************************************************************************) -\* once chanInit is set to TRUE in the history variable, -\* the channel never goes to UNINIT -ChannelInitInv == - /\ historyChainA.chanInit => ~IsChannelUninit(chainAstore) - /\ historyChainB.chanInit => ~IsChannelUninit(chainBstore) - -\* once chanTryOpen is set to TRUE in the history variable, -\* the channel never goes to UNINIT -ChannelTryOpenInv == - /\ historyChainA.chanTryOpen => ~IsChannelUninit(chainAstore) - /\ historyChainB.chanTryOpen => ~IsChannelUninit(chainBstore) - -\* once chanOpen is set to TRUE in the history variable, -\* the channel never goes to UNINIT, INIT, or TRYOPEN -ChannelOpenInv == - /\ historyChainA.chanOpen => (/\ ~IsChannelUninit(chainAstore) - /\ ~IsChannelInit(chainAstore) - /\ ~IsChannelTryOpen(chainAstore)) - /\ historyChainB.chanOpen => (/\ ~IsChannelUninit(chainBstore) - /\ ~IsChannelInit(chainBstore) - /\ ~IsChannelTryOpen(chainBstore)) - -\* once chanClosed is set to TRUE in the history variable, -\* the channel never goes to UNINIT, INIT, TRYOPEN, or OPEN -ChannelCloseInv == - /\ historyChainA.chanClosed => (/\ ~IsChannelUninit(chainAstore) - /\ ~IsChannelInit(chainAstore) - /\ ~IsChannelTryOpen(chainAstore) - /\ ~IsChannelOpen(chainAstore)) - /\ historyChainB.chanClosed => (/\ ~IsChannelUninit(chainBstore) - /\ ~IsChannelInit(chainBstore) - /\ ~IsChannelTryOpen(chainBstore) - /\ ~IsChannelOpen(chainBstore)) - -(*************************************************************************** - Invariant [IBCInv] - ***************************************************************************) -\* IBCInv invariant: conjunction of invariants -IBCInv == - \* at least one relayer creates connection datagrams - /\ (ConnectionDatagramsRelayer1 \/ ConnectionDatagramsRelayer2) - => /\ ConnectionInitInv - /\ ConnectionTryOpenInv - /\ ConnectionOpenInv - \* at least one relayer creates channel datagrams - /\ (ChannelDatagramsRelayer1 \/ ChannelDatagramsRelayer2) - => /\ ChannelInitInv - /\ ChannelTryOpenInv - /\ ChannelOpenInv - /\ ChannelCloseInv - - -(*************************************************************************** - Safety: client datagrams - ***************************************************************************) - -\* it ALWAYS holds that, for every chainID and every height h: -\* - if -\* * there is a "ClientUpdate" datagram for chainID and height h and -\* * the height h is smaller than the maximal counterparty client height -\* at chainID -\* - then -\* * the height h is NEVER added to the counterparty client heights -\* -\* Note: this property does not hold when it is allowed to install older headers -ClientUpdateSafety == - [](\A chainID \in ChainIDs : \A h \in Heights : - (/\ IsClientUpdateInIncomingDatagrams(chainID, h) - /\ h < GetMaxCounterpartyClientHeight(GetChainByID(chainID))) - => [](~IsCounterpartyClientHeightOnChain(GetChainByID(chainID), h))) - -(*************************************************************************** - Safety: connection datagrams - ***************************************************************************) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * the connection end is in INIT -\* - then -\* * it NEVER goes to UNINIT -ConnectionInitSafety == - [](\A chainID \in ChainIDs: - /\ IsConnectionInit(GetChainByID(chainID)) - => [](~IsConnectionUninit(GetChainByID(chainID)))) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * the connection end is in TRYOPEN -\* - then -\* * it NEVER goes to UNINIT ] -ConnectionTryOpenSafety == - [](\A chainID \in ChainIDs: - /\ IsConnectionTryOpen(GetChainByID(chainID)) - => [](~IsConnectionUninit(GetChainByID(chainID)))) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * the connection end is in OPEN -\* - then -\* * it NEVER goes to UNINIT, INIT, or TRYOPEN -ConnectionOpenSafety == - [](\A chainID \in ChainIDs: - /\ IsConnectionOpen(GetChainByID(chainID)) - => [](/\ ~IsConnectionUninit(GetChainByID(chainID)) - /\ ~IsConnectionInit(GetChainByID(chainID)) - /\ ~IsConnectionTryOpen(GetChainByID(chainID)))) - -(*************************************************************************** - Safety: channels datagrams - ***************************************************************************) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * the channel end is in INIT -\* - then -\* * it NEVER goes to UNINIT -ChannelInitSafety == - [](\A chainID \in ChainIDs: - /\ IsChannelInit(GetChainByID(chainID)) - => [](~IsChannelUninit(GetChainByID(chainID)))) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * the channel end is in TRYOPEN -\* - then -\* * it NEVER goes to UNINIT -ChannelTryOpenSafety == - [](\A chainID \in ChainIDs: - /\ IsChannelTryOpen(GetChainByID(chainID)) - => [](~IsChannelUninit(GetChainByID(chainID)))) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * the channel end is in OPEN -\* - then -\* * it NEVER goes to UNINIT, INIT, or TRYOPEN -ChannelOpenSafety == - [](\A chainID \in ChainIDs: - /\ IsChannelOpen(GetChainByID(chainID)) - => [](/\ ~IsChannelUninit(GetChainByID(chainID)) - /\ ~IsChannelInit(GetChainByID(chainID)) - /\ ~IsChannelTryOpen(GetChainByID(chainID)))) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * the channel end is in CLOSED -\* - then -\* * it NEVER goes to UNINIT, INIT, TRYOPEN, or OPEN -ChannelCloseSafety == - [](\A chainID \in ChainIDs: - /\ IsChannelClosed(GetChainByID(chainID)) - => [](/\ ~IsChannelUninit(GetChainByID(chainID)) - /\ ~IsChannelInit(GetChainByID(chainID)) - /\ ~IsChannelTryOpen(GetChainByID(chainID)) - /\ ~IsChannelOpen(GetChainByID(chainID)))) - -(*************************************************************************** - Safety [IBCSafety]: - Bad datagrams are not used to update the chain stores - ***************************************************************************) -\* IBCSafety property: conjunction of safety properties -IBCSafety == - \* at least one relayer creates client datagrams - /\ (ClientDatagramsRelayer1 \/ ClientDatagramsRelayer2) - => ClientUpdateSafety - \* at least one relayer creates connection datagrams - /\ (ConnectionDatagramsRelayer1 \/ ConnectionDatagramsRelayer2) - => /\ ConnectionInitSafety - /\ ConnectionTryOpenSafety - /\ ConnectionOpenSafety - \* at least one relayer creates channel datagrams - /\ (ChannelDatagramsRelayer1 \/ ChannelDatagramsRelayer2) - => /\ ChannelInitSafety - /\ ChannelTryOpenSafety - /\ ChannelOpenSafety - /\ ChannelCloseSafety - -(*************************************************************************** - Liveness: Eventual delivery of client datagrams - ***************************************************************************) - -\* it ALWAYS holds that, for every chainID: -\* - if -\* * the counterparty client is not initialized -\* - then -\* * the chain EVENTUALLY creates the counterparty client -CreateClientDelivery == - [](\A chainID \in ChainIDs : - (GetCounterpartyClientHeights(GetChainByID(chainID)) = {}) - => <>(IsCounterpartyClientOnChain(GetChainByID(chainID)))) - -\* it ALWAYS holds that, for every chainID and every height h -\* - if -\* * EVENTUALLY a ClientUpdate for height h is sent to chainID -\* - then -\* * EVENTUALLY height h is added to counterparty client heights of chainID -ClientUpdateDelivery == - [](\A chainID \in ChainIDs : \A h \in Heights : - (<>IsClientUpdateInOutgoingDatagrams(chainID, h) - => <>(IsCounterpartyClientHeightOnChain(GetChainByID(chainID), h)))) - -(*************************************************************************** - Liveness: Eventual delivery of connection datagrams - ***************************************************************************) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * EVENTUALLY a ConnOpenInit is sent to chainID -\* - then -\* * EVENTUALLY the connections at chainID and its counterparty are open -ConnOpenInitDelivery == - [](\A chainID \in ChainIDs : - (<>IsConnOpenInitInOutgoingDatagrams(chainID) - => <>(/\ IsConnectionOpen(GetChainByID(chainID)) - /\ IsConnectionOpen(GetChainByID(GetCounterpartyChainID(chainID)))))) - -(*************************************************************************** - Liveness: Eventual delivery of channel datagrams - ***************************************************************************) -\* it ALWAYS holds that, for every chainID -\* - if -\* * EVENTUALLY a ChanOpenInit is sent to chainID -\* - then -\* * EVENTUALLY the channels at chainID and its counterparty are open -ChanOpenInitDelivery == - [](\A chainID \in ChainIDs : - (<>IsChanOpenInitInOutgoingDatagrams(chainID) - => <>(/\ IsChannelOpen(GetChainByID(chainID)) - /\ IsChannelOpen(GetChainByID(GetCounterpartyChainID(chainID)))))) - -\* it ALWAYS holds that, for every chainID -\* - if -\* * EVENTUALLY a ChanCloseInit is sent to chainID -\* - then -\* * EVENTUALLY the channels at chainID and its counterparty are closed -ChanCloseInitDelivery == - [](\A chainID \in ChainIDs : - (<>IsChanCloseInitInOutgoingDatagrams(chainID) - => <>(/\ IsChannelClosed(GetChainByID(chainID)) - /\ IsChannelClosed(GetChainByID(GetCounterpartyChainID(chainID)))))) - -(*************************************************************************** - Liveness [IBCDelivery]: - If ChainA sends a datagram to ChainB, then ChainB eventually receives - the datagram - - * ChainA sends a datagram iff a correct relayer constructs the datagram by - scanning ChainA's store - * ChainB receives a datagram iff it acts upon this datagram - ***************************************************************************) -\* IBCDelivery property: conjunction of delivery properties -IBCDelivery == - \* at least one relayer creates client datagrams - /\ (ClientDatagramsRelayer1 \/ ClientDatagramsRelayer2) - => /\ CreateClientDelivery - /\ ClientUpdateDelivery - \* at least one relayer creates connection datagrams - /\ (ConnectionDatagramsRelayer1 \/ ConnectionDatagramsRelayer2) - => ConnOpenInitDelivery - \* at least one relayer creates channel datagrams - /\ (ChannelDatagramsRelayer1 \/ ChannelDatagramsRelayer2) - => /\ ChanOpenInitDelivery - /\ ChanCloseInitDelivery - -============================================================================= -\* Modification History -\* Last modified Mon Apr 12 14:05:32 CEST 2021 by ilinastoilkovska -\* Created Fri Jun 05 16:48:22 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/ibc-core/IBCCoreDefinitions.tla b/docs/spec/tla/ibc-core/IBCCoreDefinitions.tla deleted file mode 100644 index 758a046817..0000000000 --- a/docs/spec/tla/ibc-core/IBCCoreDefinitions.tla +++ /dev/null @@ -1,703 +0,0 @@ ------------------------- MODULE IBCCoreDefinitions ------------------------- - -(*************************************************************************** - This module contains definitions of operators that are shared between the - different modules. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences - -(************************ TYPE ALIASES FOR SNOWCAT *************************) -(* @typeAlias: CHAN = - [ - state: Str, - order: Str, - portID: Str, - channelID: Str, - counterpartyPortID: Str, - counterpartyChannelID: Str, - nextSendSeq: Int, - nextRcvSeq: Int, - nextAckSeq: Int - ]; -*) -(* @typeAlias: CONN = - [ - state: Str, - connectionID: Str, - clientID: Str, - counterpartyConnectionID: Str, - counterpartyClientID: Str, - channelEnd: CHAN, - versions: Set(Int) - ]; -*) -(* @typeAlias: PACKET = - [ - sequence: Int, - timeoutHeight: Int, - srcPortID: Str, - srcChannelID: Str, - dstPortID: Str, - dstChannelID: Str - ]; -*) -(* @typeAlias: PACKETCOMM = - [ - portID: Str, - channelID: Str, - sequence: Int, - timeoutHeight: Int - ]; -*) -(* @typeAlias: PACKETREC = - [ - portID: Str, - channelID: Str, - sequence: Int - ]; -*) -(* @typeAlias: PACKETACK = - [ - portID: Str, - channelID: Str, - sequence: Int, - acknowledgement: Bool - ]; -*) -(* @typeAlias: CHAINSTORE = - [ - height: Int, - counterpartyClientHeights: Set(Int), - connectionEnd: CONN, - packetCommitments: Set(PACKETCOMM), - packetsToAcknowledge: Seq(PACKET), - packetReceipts: Set(PACKETREC), - packetAcknowledgements: Set(PACKETACK) - ]; -*) -(* @typeAlias: DATAGRAM = - [ - type: Str, - height: Int, - proofHeight: Int, - consensusHeight: Int, - clientID: Str, - counterpartyClientID: Str, - connectionID: Str, - counterpartyConnectionID: Str, - versions: Set(Int), - portID: Str, - channelID: Str, - counterpartyPortID: Str, - counterpartyChannelID: Str, - packet: PACKET, - acknowledgement: Bool - ]; -*) -(* @typeAlias: LOGENTRY = - [ - type: Str, - srcChainID: Str, - sequence: Int, - timeoutHeight: Int, - acknowledgement: Bool - ]; -*) -(* @typeAlias: HISTORY = - [ - connInit: Bool, - connTryOpen: Bool, - connOpen: Bool, - chanInit: Bool, - chanTryOpen: Bool, - chanOpen: Bool, - chanClosed: Bool - ]; -*) - -(********************** Common operator definitions ***********************) -ChainIDs == {"chainA", "chainB"} -ClientIDs == {"clA", "clB"} -ConnectionIDs == {"connAtoB", "connBtoA"} -ChannelIDs == {"chanAtoB", "chanBtoA"} -PortIDs == {"portA", "portB"} - -nullHeight == 0 -nullClientID == "none" -nullConnectionID == "none" -nullChannelID == "none" -nullPortID == "none" - -ConnectionStates == {"UNINIT", "INIT", "TRYOPEN", "OPEN"} -ChannelStates == {"UNINIT", "INIT", "TRYOPEN", "OPEN", "CLOSED"} -ChannelOrder == {"ORDERED", "UNORDERED"} - -Max(S) == CHOOSE x \in S: \A y \in S: y <= x -Min(S) == CHOOSE x \in S: \A y \in S: y >= x - -(******************************* ChannelEnds ******************************* - A set of channel end records. - A channel end record contains the following fields: - - - state -- a string - Stores the current state of this channel end. It has one of the - following values: "UNINIT", "INIT", "TRYOPEN", "OPEN", "CLOSED". - - - order -- a string - Stores whether the channel end is ordered or unordered. It has one - of the following values: "UNORDERED", "ORDERED". - - * ordered channels have three additional packet sequence fields: - nextSendSeq -- stores the sequence number of the next packet that - is going to be sent, - nextRcvSeq -- stores the sequence number of the next packet that - is going to be received, - nextAckSeq -- stores the sequence number of the next packet that - is going to be acknowledged. - - - portID -- a port identifier - Stores the port identifier of this channel end. - - - channelID -- a channel identifier - Stores the channel identifier of this channel end. - - - counterpartyPortID -- a port identifier - Stores the port identifier of the counterparty channel end. - - - counterpartyChannelID -- a channel identifier - Stores the channel identifier of the counterparty channel end. - - Note: we omit channel versions and connection hops. - ***************************************************************************) -ChannelEnds(channelOrdering, maxPacketSeq) == - IF channelOrdering = "UNORDERED" - THEN \* set of unordered channels - [ - state : ChannelStates, - order : {"UNORDERED"}, - portID : PortIDs \union {nullPortID}, - channelID : ChannelIDs \union {nullChannelID}, - counterpartyPortID : PortIDs \union {nullPortID}, - counterpartyChannelID : ChannelIDs \union {nullChannelID} - ] - ELSE \* set of ordered channels - [ - state : ChannelStates, - order : {"ORDERED"}, - nextSendSeq : 0..maxPacketSeq, - nextRcvSeq : 0..maxPacketSeq, - nextAckSeq : 0..maxPacketSeq, - portID : PortIDs \union {nullPortID}, - channelID : ChannelIDs \union {nullChannelID}, - counterpartyPortID : PortIDs \union {nullPortID}, - counterpartyChannelID : ChannelIDs \union {nullChannelID} - ] - - -(******* PacketCommitments, PacketReceipts, PacketAcknowledgements *********) -\* Set of packet commitments -PacketCommitments(Heights, maxPacketSeq) == - [ - portID : PortIDs, - channelID : ChannelIDs, - sequence : 1..maxPacketSeq, - timeoutHeight : Heights - ] - -\* Set of packet receipts -PacketReceipts(maxPacketSeq) == - [ - portID : PortIDs, - channelID : ChannelIDs, - sequence : 1..maxPacketSeq - ] - -\* Set of packet acknowledgements -PacketAcknowledgements(maxPacketSeq) == - [ - portID : PortIDs, - channelID : ChannelIDs, - sequence : 1..maxPacketSeq, - acknowledgement : BOOLEAN - ] - -(***************************** ConnectionEnds ***************************** - A set of connection end records. - A connection end record contains the following fields: - - - state -- a string - Stores the current state of this connection end. It has one of the - following values: "UNINIT", "INIT", "TRYOPEN", "OPEN". - - - connectionID -- a connection identifier - Stores the connection identifier of this connection end. - - - counterpartyConnectionID -- a connection identifier - Stores the connection identifier of the counterparty connection end. - - - clientID -- a client identifier - Stores the client identifier associated with this connection end. - - - counterpartyClientID -- a client identifier - Stores the counterparty client identifier associated with this connection end. - - - versions -- a set of versions - Stores the set of supported connection versions. At the end of a handshake, - it should be a singleton set. - - - channelEnd : a channel end record - Stores data about the channel associated with this connection end. - ***************************************************************************) -ConnectionEnds(channelOrdering, maxPacketSeq, Versions) == - [ - state : ConnectionStates, - connectionID : ConnectionIDs \union {nullConnectionID}, - counterpartyConnectionID : ConnectionIDs \union {nullConnectionID}, - clientID : ClientIDs \union {nullClientID}, - counterpartyClientID : ClientIDs \union {nullClientID}, - versions : (SUBSET Versions) \ {{}}, - channelEnd : ChannelEnds(channelOrdering, maxPacketSeq) - ] - -(********************************* Packets *********************************) -\* Set of packets -Packets(Heights, maxPacketSeq) == - [ - sequence : 1..maxPacketSeq, - timeoutHeight : Heights, - srcPortID : PortIDs, - srcChannelID : ChannelIDs, - dstPortID : PortIDs, - dstChannelID : ChannelIDs - ] - -(******************************** ChainStores ****************************** - A set of chain store records. - A chain store record contains the following fields: - - - height : an integer between nullHeight and MaxHeight. - Stores the current height of the chain. - - - counterpartyClientHeights : a set of integers between 1 and MaxHeight - Stores the heights of the client for the counterparty chain. - - - connectionEnd : a connection end record - Stores data about the connection with the counterparty chain. - - - packetCommitments : a set of packet commitments - A packet commitment is added to this set when a chain sends a packet - to the counterparty. - - - packetReceipts : a set of packet receipts - A packet receipt is added to this set when a chain received a packet - from the counterparty chain. - - - packetsToAcknowledge : a sequence of packets - A packet is added to this sequence when a chain receives it and is used - later for the receiver chain to write an acknowledgement for the packet. - - - packetAcknowledgements : a set of packet acknowledgements - A packet acknowledgement is added to this set when a chain writes an - acknowledgement for a packet it received from the counterparty. - - A chain store is the combination of the provable and private stores. - ***************************************************************************) -ChainStores(Heights, channelOrdering, maxPacketSeq, Versions) == - [ - height : Heights, - counterpartyClientHeights : SUBSET(Heights), - connectionEnd : ConnectionEnds(channelOrdering, maxPacketSeq, Versions), - packetCommitments : SUBSET(PacketCommitments(Heights, maxPacketSeq)), - packetReceipts : SUBSET(PacketReceipts(maxPacketSeq)), - packetsToAcknowledge : Seq(Packets(Heights, maxPacketSeq)), - packetAcknowledgements : SUBSET(PacketAcknowledgements(maxPacketSeq)) - ] - -(******************************** Datagrams ********************************) -\* Set of datagrams -Datagrams(Heights, maxPacketSeq, Versions) == - [ - type : {"ClientCreate"}, - clientID : ClientIDs, - height : Heights - ] \union [ - type : {"ClientUpdate"}, - clientID : ClientIDs, - height : Heights - ] \union [ - type : {"ConnOpenInit"}, - connectionID : ConnectionIDs, - counterpartyConnectionID : ConnectionIDs, - clientID : ClientIDs, - counterpartyClientID : ClientIDs - ] \union [ - type : {"ConnOpenTry"}, - desiredConnectionID : ConnectionIDs, - counterpartyConnectionID : ConnectionIDs, - clientID : ClientIDs, - counterpartyClientID : ClientIDs, - versions : SUBSET (Versions), - proofHeight : Heights, - consensusHeight : Heights - ] \union [ - type : {"ConnOpenAck"}, - connectionID : ConnectionIDs, - versions : SUBSET (Versions), - proofHeight : Heights, - consensusHeight : Heights - ] \union [ - type : {"ConnOpenConfirm"}, - connectionID : ConnectionIDs, - proofHeight : Heights - ] \union [ - type : {"ChanOpenInit"}, - portID : PortIDs, - channelID : ChannelIDs, - counterpartyPortID : PortIDs, - counterpartyChannelID : ChannelIDs - ] \union [ - type : {"ChanOpenTry"}, - portID : PortIDs, - channelID : ChannelIDs, - counterpartyPortID : PortIDs, - counterpartyChannelID : ChannelIDs, - proofHeight : Heights - ] \union [ - type : {"ChanOpenAck"}, - portID : PortIDs, - channelID : ChannelIDs, - proofHeight : Heights - ] \union [ - type : {"ChanOpenConfirm"}, - portID : PortIDs, - channelID : ChannelIDs, - proofHeight : Heights - ] \union [ - type : {"ChanCloseInit"}, - portID : PortIDs, - channelID : ChannelIDs - ] \union [ - type : {"ChanCloseConfirm"}, - portID : PortIDs, - channelID : ChannelIDs, - proofHeight : Heights - ] \union [ - type : {"PacketRecv"}, - packet : Packets(Heights, maxPacketSeq), - proofHeight : Heights - ] \union [ - type : {"PacketAck"}, - packet : Packets(Heights, maxPacketSeq), - acknowledgement : BOOLEAN, - proofHeight : Heights - ] - -\* Null datagram -NullDatagram == - [type |-> "null"] - -(**************************** PacketLogEntries *****************************) -\* Set of packet log entries -PacketLogEntries(Heights, maxPacketSeq) == - [ - type : {"PacketSent"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - timeoutHeight : Heights - ] \union [ - type : {"PacketRecv"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - portID : PortIDs, - channelID : ChannelIDs, - timeoutHeight : Heights - ] \union [ - type : {"WriteAck"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - portID : PortIDs, - channelID : ChannelIDs, - timeoutHeight : Heights, - acknowledgement : BOOLEAN - ] - -\* Null packet log entry -NullPacketLogEntry == - [type |-> "null"] - -(******************************* Histories ********************************) -\* Set of history variable records -Histories == - [ - connInit : BOOLEAN, - connTryOpen : BOOLEAN, - connOpen : BOOLEAN, - chanInit : BOOLEAN, - chanTryOpen : BOOLEAN, - chanOpen : BOOLEAN, - chanClosed : BOOLEAN - ] - -(*************************************************************************** - Initial values of a channel end, connection end, chain - ***************************************************************************) -\* Initial value of an unordered channel end: -\* - state is "UNINIT" -\* - order is "UNORDERED" -\* - channelID, counterpartyPortID, counterpartyChannelID are uninitialized -InitUnorderedChannelEnd == - [ - state |-> "UNINIT", - order |-> "UNORDERED", - portID |-> nullPortID, - channelID |-> nullChannelID, - counterpartyPortID |-> nullPortID, - counterpartyChannelID |-> nullChannelID - ] - -\* Initial value of an ordered channel end: -\* - state is "UNINIT" -\* - order is "ORDERED" -\* - nextSendSeq, nextRcvSeq, nextAckSeq are set to 0 -\* - channelID, counterpartyPortID, counterpartyChannelID are uninitialized -InitOrderedChannelEnd == - [ - state |-> "UNINIT", - order |-> "ORDERED", - nextSendSeq |-> 0, - nextRcvSeq |-> 0, - nextAckSeq |-> 0, - portID |-> nullPortID, - channelID |-> nullChannelID, - counterpartyPortID |-> nullPortID, - counterpartyChannelID |-> nullChannelID - ] - -\* Initial value of a connection end: -\* - state is "UNINIT" -\* - connectionID, counterpartyConnectionID are uninitialized -\* - clientID, counterpartyClientID are uninitialized -\* - versions is an arbitrary (non-empty) subset of the set {1, .., maxVersion} -\* - channelEnd is initialized based on channelOrdering -InitConnectionEnds(Versions, channelOrdering) == - IF channelOrdering = "ORDERED" - THEN [ - state : {"UNINIT"}, - connectionID : {nullConnectionID}, - clientID : {nullClientID}, - counterpartyConnectionID : {nullConnectionID}, - counterpartyClientID : {nullClientID}, - versions : (SUBSET Versions) \ {{}}, - channelEnd : {InitOrderedChannelEnd} - ] - ELSE [ - state : {"UNINIT"}, - connectionID : {nullConnectionID}, - clientID : {nullClientID}, - counterpartyConnectionID : {nullConnectionID}, - counterpartyClientID : {nullClientID}, - versions : (SUBSET Versions) \ {{}}, - channelEnd : {InitUnorderedChannelEnd} - ] - -\* Initial value of the chain store: -\* - height is initialized to 1 -\* - the counterparty light client is uninitialized -\* - the connection end is initialized to InitConnectionEnd -\* - the packet committments, receipts, acknowledgements, and -\* packets to acknowledge are empty -InitChainStore(Versions, channelOrdering) == - [ - height : {1}, - counterpartyClientHeights : {{}}, - connectionEnd : InitConnectionEnds(Versions, channelOrdering), - - packetCommitments : {{}}, - packetReceipts : {{}}, - packetAcknowledgements : {{}}, - packetsToAcknowledge : {<<>>} - - ] - -\* Initial value of history flags -InitHistory == - [ - connInit |-> FALSE, - connTryOpen |-> FALSE, - connOpen |-> FALSE, - chanInit |-> FALSE, - chanTryOpen |-> FALSE, - chanOpen |-> FALSE, - chanClosed |-> FALSE - ] - -(*************************************************************************** - Client helper operators - ***************************************************************************) - -\* get the ID of chainID's counterparty chain -GetCounterpartyChainID(chainID) == - \* IF chainID = "chainA" THEN AsID("chainB") ELSE AsID("chainA") - IF chainID = "chainA" THEN "chainB" ELSE "chainA" - -\* get the client ID of the client for chainID -GetClientID(chainID) == - \* IF chainID = "chainA" THEN AsID("clA") ELSE AsID("clB") - IF chainID = "chainA" THEN "clA" ELSE "clB" - -\* get the client ID of the client for chainID's counterparty chain -GetCounterpartyClientID(chainID) == - \* IF chainID = "chainA" THEN AsID("clB") ELSE AsID("clA") - IF chainID = "chainA" THEN "clB" ELSE "clA" - -\* get the latest height of chainID -\* @type: (CHAINSTORE) => Int; -GetLatestHeight(chain) == - chain.height - -\* get the maximal height of the client for chainID's counterparty chain -\* @type: (CHAINSTORE) => Int; -GetMaxCounterpartyClientHeight(chain) == - IF chain.counterpartyClientHeights /= {} - THEN Max(chain.counterpartyClientHeights) - ELSE nullHeight - -\* get the set of heights of the client for chainID's counterparty chain -\* @type: (CHAINSTORE) => Set(Int); -GetCounterpartyClientHeights(chain) == - chain.counterpartyClientHeights - -\* returns true if the counterparty client is initialized on chainID -\* @type: (CHAINSTORE) => Bool; -IsCounterpartyClientOnChain(chain) == - chain.counterpartyClientHeights /= {} - -\* returns true if the height h is in counterparty client heights on chainID -\* @type: (CHAINSTORE, Int) => Bool; -IsCounterpartyClientHeightOnChain(chain, h) == - h \in chain.counterpartyClientHeights - -(*************************************************************************** - Connection helper operators - ***************************************************************************) - -\* get the connection ID of the connection end at chainID -GetConnectionID(chainID) == - IF chainID = "chainA" - THEN "connAtoB" - ELSE IF chainID = "chainB" - THEN "connBtoA" - ELSE nullConnectionID - -\* get the connection ID of the connection end at chainID's counterparty chain -GetCounterpartyConnectionID(chainID) == - IF chainID = "chainA" - THEN "connBtoA" - ELSE IF chainID = "chainB" - THEN "connAtoB" - ELSE nullConnectionID - -\* get the connection end at chainID -\* @type: (CHAINSTORE) => CONN; -GetConnectionEnd(chain) == - chain.connectionEnd - -\* pick the minimal version from a set of versions -PickVersion(versions) == - IF versions /= {} - THEN LET minVersion == Min(versions) IN - {minVersion} - ELSE {} - - -\* returns true if the connection end on chainID is UNINIT -\* @type: (CHAINSTORE) => Bool; -IsConnectionUninit(chain) == - chain.connectionEnd.state = "UNINIT" - -\* returns true if the connection end on chainID is INIT -\* @type: (CHAINSTORE) => Bool; -IsConnectionInit(chain) == - chain.connectionEnd.state = "INIT" - -\* returns true if the connection end on chainID is TRYOPEN -\* @type: (CHAINSTORE) => Bool; -IsConnectionTryOpen(chain) == - chain.connectionEnd.state = "TRYOPEN" - -\* returns true if the connection end on chainID is OPEN -\* @type: (CHAINSTORE) => Bool; -IsConnectionOpen(chain) == - chain.connectionEnd.state = "OPEN" - -(*************************************************************************** - Channel helper operators - ***************************************************************************) - -\* get the channel ID of the channel end at the connection end of chainID -GetChannelID(chainID) == - IF chainID = "chainA" - THEN "chanAtoB" - ELSE IF chainID = "chainB" - THEN "chanBtoA" - ELSE nullChannelID - -\* get the channel ID of the channel end at chainID's counterparty chain -GetCounterpartyChannelID(chainID) == - IF chainID = "chainA" - THEN "chanBtoA" - ELSE IF chainID = "chainB" - THEN "chanAtoB" - ELSE nullChannelID - -\* get the port ID at chainID -GetPortID(chainID) == - IF chainID = "chainA" - THEN "portA" - ELSE IF chainID = "chainB" - THEN "portB" - ELSE nullPortID - -\* get the port ID at chainID's counterparty chain -GetCounterpartyPortID(chainID) == - IF chainID = "chainA" - THEN "portB" - ELSE IF chainID = "chainB" - THEN "portA" - ELSE nullPortID - -\* get the channel end at the connection end of chainID -\* @type: (CHAINSTORE) => CHAN; -GetChannelEnd(chain) == - chain.connectionEnd.channelEnd - -\* returns true if the channel end on chainID is UNINIT -\* @type: (CHAINSTORE) => Bool; -IsChannelUninit(chain) == - chain.connectionEnd.channelEnd.state = "UNINIT" - -\* returns true if the channel end on chainID is INIT -\* @type: (CHAINSTORE) => Bool; -IsChannelInit(chain) == - chain.connectionEnd.channelEnd.state = "INIT" - -\* returns true if the channel end on chainID is TRYOPEN -\* @type: (CHAINSTORE) => Bool; -IsChannelTryOpen(chain) == - chain.connectionEnd.channelEnd.state = "TRYOPEN" - -\* returns true if the channel end on chainID is OPEN -\* @type: (CHAINSTORE) => Bool; -IsChannelOpen(chain) == - chain.connectionEnd.channelEnd.state = "OPEN" - -\* returns true if the channel end on chainID is CLOSED -\* @type: (CHAINSTORE) => Bool; -IsChannelClosed(chain) == - chain.connectionEnd.channelEnd.state = "CLOSED" - -============================================================================= -\* Modification History -\* Last modified Mon Apr 12 14:26:47 CEST 2021 by ilinastoilkovska -\* Created Fri Jun 05 16:56:21 CET 2020 by ilinastoilkovska \ No newline at end of file diff --git a/docs/spec/tla/ibc-core/ICS02ClientHandlers.tla b/docs/spec/tla/ibc-core/ICS02ClientHandlers.tla deleted file mode 100644 index c9d1489748..0000000000 --- a/docs/spec/tla/ibc-core/ICS02ClientHandlers.tla +++ /dev/null @@ -1,75 +0,0 @@ ------------------------ MODULE ICS02ClientHandlers ------------------------- - -(*************************************************************************** - This module contains definitions of operators that are used to handle - client create and update datagrams. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, IBCCoreDefinitions - -(*************************************************************************** - Client datagram handlers - ***************************************************************************) - -\* Handle "CreateClient" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleCreateClient(chainID, chain, datagrams) == - \* get "CreateClient" datagrams with valid clientID - LET createClientDgrs == {dgr \in datagrams : - /\ dgr.type = "ClientCreate" - /\ dgr.clientID = GetCounterpartyClientID(chainID)} IN - \* get heights in datagrams with correct counterparty clientID for chainID - LET createClientHeights == {dgr.height : dgr \in createClientDgrs} IN - - \* new chain record with clients created - LET clientCreateChain == [ - chain EXCEPT !.counterpartyClientHeights = - \* if the set of counterparty client heights is not empty or - \* if the set of heights from datagrams is empty - IF \/ chain.counterpartyClientHeights /= {} - \/ createClientHeights = {} - \* then discard CreateClient datagrams - THEN chain.counterpartyClientHeights - \* otherwise, create counterparty client with height Max(createClientHeights) - ELSE {Max(createClientHeights)} - ] IN - - clientCreateChain - -\* Handle "ClientUpdate" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleClientUpdate(chainID, chain, datagrams) == - \* max client height for counterparty chain - LET maxClientHeight == GetMaxCounterpartyClientHeight(chain) IN - \* get "ClientUpdate" datagrams with valid clientID - LET updateClientDgrs == {dgr \in datagrams : - /\ dgr.type = "ClientUpdate" - /\ dgr.clientID = GetCounterpartyClientID(chainID) - \* Note: the check maxClientHeight < dgr.height can be commented out in case - \* older headers can be installed for the client - /\ maxClientHeight < dgr.height - } IN - \* get heights in datagrams with correct counterparty clientID for chainID - LET updateClientHeights == {dgr.height : dgr \in updateClientDgrs} IN - - \* new chain record with clients updated - LET clientUpdatedChain == [ - chain EXCEPT !.counterpartyClientHeights = - \* if set of counterparty client heights is empty - IF chain.counterpartyClientHeights = {} - \* then discard ClientUpdate datagrams - THEN chain.counterpartyClientHeights - \* otherwise, if set of heights from datagrams is not empty - ELSE IF updateClientHeights /= {} - \* then update counterparty client heights with updateClientHeights - THEN chain.counterpartyClientHeights \union updateClientHeights - \* otherwise, do not update client heights - ELSE chain.counterpartyClientHeights - ] IN - - clientUpdatedChain - -============================================================================= -\* Modification History -\* Last modified Mon Apr 12 14:23:14 CEST 2021 by ilinastoilkovska -\* Created Tue Apr 07 16:42:47 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/ibc-core/ICS03ConnectionHandlers.tla b/docs/spec/tla/ibc-core/ICS03ConnectionHandlers.tla deleted file mode 100644 index 72fc205aa3..0000000000 --- a/docs/spec/tla/ibc-core/ICS03ConnectionHandlers.tla +++ /dev/null @@ -1,167 +0,0 @@ ----------------------- MODULE ICS03ConnectionHandlers ---------------------- - -(*************************************************************************** - This module contains definitions of operators that are used to handle - connection handshake datagrams. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, IBCCoreDefinitions - -(*************************************************************************** - Connection datagram handlers - ***************************************************************************) - -\* Handle "ConnOpenInit" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleConnOpenInit(chainID, chain, datagrams) == - \* get "ConnOpenInit" datagrams, with a valid connection ID - LET connOpenInitDgrs == {dgr \in datagrams : - /\ dgr.type = "ConnOpenInit" - /\ dgr.connectionID = GetConnectionID(chainID)} IN - - \* if there are valid "ConnOpenInit" datagrams, create a new connection end - \* and update the chain store - IF /\ connOpenInitDgrs /= {} - /\ chain.connectionEnd.state = "UNINIT" - THEN LET connOpenInitDgr == CHOOSE dgr \in connOpenInitDgrs : TRUE IN - LET connOpenInitConnectionEnd == [ - state |-> "INIT", - connectionID |-> connOpenInitDgr.connectionID, - counterpartyConnectionID |-> connOpenInitDgr.counterpartyConnectionID, - clientID |-> connOpenInitDgr.clientID, - counterpartyClientID |-> connOpenInitDgr.counterpartyClientID, - versions |-> chain.connectionEnd.versions, - channelEnd |-> chain.connectionEnd.channelEnd - ] IN - LET connOpenInitChain == [ - chain EXCEPT !.connectionEnd = connOpenInitConnectionEnd - ] IN - - connOpenInitChain - - \* otherwise, do not update the chain store - ELSE chain - - -\* Handle "ConnOpenTry" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleConnOpenTry(chainID, chain, datagrams) == - \* get "ConnOpenTry" datagrams, with a valid connection ID and valid height - LET connOpenTryDgrs == {dgr \in datagrams : - /\ dgr.type = "ConnOpenTry" - /\ dgr.desiredConnectionID = GetConnectionID(chainID) - /\ dgr.consensusHeight <= chain.height - /\ dgr.proofHeight \in chain.counterpartyClientHeights} IN - - IF connOpenTryDgrs /= {} - \* if there are valid "ConnOpenTry" datagrams, update the connection end - THEN LET connOpenTryDgr == CHOOSE dgr \in connOpenTryDgrs : TRUE IN - LET versionIntersection == chain.connectionEnd.versions \intersect connOpenTryDgr.versions IN - - \* if the versions from the datagram overlap with the supported versions of the connnection end - IF /\ versionIntersection /= {} - \* if the connection end is uninitialized - /\ \/ chain.connectionEnd.state = "UNINIT" - \* of if it is initialized, and all fields match the datagram fields - \/ /\ chain.connectionEnd.state = "INIT" - /\ chain.connectionEnd.connectionID - = connOpenTryDgr.desiredConnectionID - /\ chain.connectionEnd.counterpartyConnectionID - = connOpenTryDgr.counterpartyConnectionID - /\ chain.connectionEnd.clientID - = connOpenTryDgr.clientID - /\ chain.connectionEnd.counterpartyClientID - = connOpenTryDgr.counterpartyClientID - \* update the connection end in the chain store - THEN LET connOpenTryConnectionEnd == [ - state |-> "TRYOPEN", - connectionID |-> connOpenTryDgr.desiredConnectionID, - counterpartyConnectionID |-> connOpenTryDgr.counterpartyConnectionID, - clientID |-> connOpenTryDgr.clientID, - counterpartyClientID |-> connOpenTryDgr.counterpartyClientID, - versions |-> PickVersion(versionIntersection), - channelEnd |-> chain.connectionEnd.channelEnd - ] IN - LET connOpenTryChain == [ - chain EXCEPT !.connectionEnd = connOpenTryConnectionEnd - ] IN - - connOpenTryChain - - \* otherwise, do not update the chain store - ELSE chain - ELSE chain - -\* Handle "ConnOpenAck" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleConnOpenAck(chainID, chain, datagrams) == - \* get existing connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get "ConnOpenAck" datagrams, with a valid connection ID and valid height - LET connOpenAckDgrs == {dgr \in datagrams : - /\ dgr.type = "ConnOpenAck" - /\ dgr.connectionID = connectionEnd.connectionID - /\ dgr.consensusHeight <= chain.height - /\ dgr.proofHeight \in chain.counterpartyClientHeights} IN - - \* if there are valid "ConnOpenAck" datagrams, update the connection end - IF connOpenAckDgrs /= {} - THEN LET connOpenAckDgr == CHOOSE dgr \in connOpenAckDgrs : TRUE IN - \* if the connection end on the chain is in "INIT" and the version set - \* from the datagram is a subset of the supported versions in the connection end - IF \/ /\ connectionEnd.state = "INIT" - /\ connOpenAckDgr.versions \subseteq connectionEnd.versions - \* or the connection end is in "TRYOPEN" and the version set - \* from the datagram is equal to the version set in the connection end - \/ /\ connectionEnd.state = "TRYOPEN" - /\ connOpenAckDgr.versions = connectionEnd.versions - \* update the connection end - THEN LET connOpenAckConnectionEnd == [ - connectionEnd EXCEPT !.state = "OPEN", - !.versions = connOpenAckDgr.versions - ] IN - LET connOpenAckChain == [ - chain EXCEPT !.connectionEnd = connOpenAckConnectionEnd - ] IN - - connOpenAckChain - - \* otherwise, do not update the chain store - ELSE chain - ELSE chain - - - -\* Handle "ConnOpenConfirm" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleConnOpenConfirm(chainID, chain, datagrams) == - \* get existing connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get "ConnOpenConfirm" datagrams, with a valid connection ID and valid height - LET connOpenConfirmDgrs == {dgr \in datagrams : - /\ dgr.type = "ConnOpenConfirm" - /\ dgr.connectionID = connectionEnd.connectionID - /\ dgr.proofHeight \in chain.counterpartyClientHeights} IN - - IF connOpenConfirmDgrs /= {} - \* if there are valid "connOpenConfirmDgrs" datagrams, update the connection end - THEN IF connectionEnd.state = "TRYOPEN" - \* if the connection end on the chain is in "TRYOPEN", update the connection end - THEN LET connOpenConfirmDgr == CHOOSE dgr \in connOpenConfirmDgrs : TRUE IN - LET connOpenConfirmConnectionEnd == [ - connectionEnd EXCEPT !.state = "OPEN" - ] IN - LET connOpenConfirmChain == [ - chain EXCEPT !.connectionEnd = connOpenConfirmConnectionEnd - ] IN - - connOpenConfirmChain - - \* otherwise, do not update the chain store - ELSE chain - ELSE chain - -============================================================================= -\* Modification History -\* Last modified Mon Apr 12 14:22:53 CEST 2021 by ilinastoilkovska -\* Created Tue Apr 07 16:09:26 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/ibc-core/ICS04ChannelHandlers.tla b/docs/spec/tla/ibc-core/ICS04ChannelHandlers.tla deleted file mode 100644 index c659d61f67..0000000000 --- a/docs/spec/tla/ibc-core/ICS04ChannelHandlers.tla +++ /dev/null @@ -1,275 +0,0 @@ ------------------------- MODULE ICS04ChannelHandlers ----------------------- - -(*************************************************************************** - This module contains definitions of operators that are used to handle - channel handshake datagrams. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, IBCCoreDefinitions - -(*************************************************************************** - Channel datagram handlers - ***************************************************************************) - -\* Handle "ChanOpenInit" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleChanOpenInit(chainID, chain, datagrams) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get "ChanOpenInit" datagrams, with a valid port and channel ID - LET chanOpenInitDgrs == {dgr \in datagrams : - /\ dgr.type = "ChanOpenInit" - /\ dgr.portID = GetPortID(chainID) - /\ dgr.channelID = GetChannelID(chainID)} IN - - \* if there are valid "ChanOpenInit" datagrams and the connection is not "UNINIT", - \* initialize the channel end and update the chain - IF /\ chanOpenInitDgrs /= {} - /\ connectionEnd.state /= "UNINIT" - /\ connectionEnd.channelEnd.state = "UNINIT" - THEN LET chanOpenInitDgr == CHOOSE dgr \in chanOpenInitDgrs : TRUE IN - LET chanOpenInitChannelEnd == - IF connectionEnd.channelEnd.order = "ORDERED" - THEN [ - state |-> "INIT", - order |-> "ORDERED", - nextSendSeq |-> 1, - nextRcvSeq |-> 1, - nextAckSeq |-> 1, - portID |-> chanOpenInitDgr.portID, - channelID |-> chanOpenInitDgr.channelID, - counterpartyPortID |-> chanOpenInitDgr.counterpartyPortID, - counterpartyChannelID |-> chanOpenInitDgr.counterpartyChannelID - ] - ELSE [ - state |-> "INIT", - order |-> "UNORDERED", - portID |-> chanOpenInitDgr.portID, - channelID |-> chanOpenInitDgr.channelID, - counterpartyPortID |-> chanOpenInitDgr.counterpartyPortID, - counterpartyChannelID |-> chanOpenInitDgr.counterpartyChannelID - ] IN - LET chanOpenInitConnectionEnd == [ - chain.connectionEnd EXCEPT !.channelEnd = chanOpenInitChannelEnd - ] IN - LET chanOpenInitChain == [ - chain EXCEPT !.connectionEnd = chanOpenInitConnectionEnd - ] IN - - chanOpenInitChain - - \* otherwise, do not update the chain store - ELSE chain - -\* Handle "ChanOpenTry" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleChanOpenTry(chainID, chain, datagrams) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get "ChanOpenTry" datagrams, with a valid port and channel ID - LET chanOpenTryDgrs == {dgr \in datagrams : - /\ dgr.type = "ChanOpenTry" - /\ dgr.portID = GetPortID(chainID) - /\ dgr.channelID = GetChannelID(chainID) - /\ dgr.proofHeight \in chain.counterpartyClientHeights} IN - - \* if there are valid "ChanOpenTry" datagrams and the connection is "OPEN", - \* update the channel end - IF /\ chanOpenTryDgrs /= {} - /\ chain.connectionEnd.state = "OPEN" - THEN LET chanOpenTryDgr == CHOOSE dgr \in chanOpenTryDgrs : TRUE IN - \* if the channel end is uninitialized - IF \/ connectionEnd.channelEnd.state = "UNINIT" - \* of if it is initialized, and all fields match the datagram fields - \/ /\ connectionEnd.channelEnd.state = "INIT" - /\ connectionEnd.channelEnd.counterpartyPortID - = chanOpenTryDgr.counterpartyPortID - /\ connectionEnd.channelEnd.counterpartyChannelID - = chanOpenTryDgr.counterpartyChannelID - \* update the channel end in the chain store - THEN LET chanOpenTryChannelEnd == - IF connectionEnd.channelEnd.order = "ORDERED" - THEN [ - state |-> "TRYOPEN", - order |-> "ORDERED", - nextSendSeq |-> 1, - nextRcvSeq |-> 1, - nextAckSeq |-> 1, - portID |-> chanOpenTryDgr.portID, - channelID |-> chanOpenTryDgr.channelID, - counterpartyPortID |-> chanOpenTryDgr.counterpartyPortID, - counterpartyChannelID |-> chanOpenTryDgr.counterpartyChannelID - ] - ELSE [ - state |-> "TRYOPEN", - order |-> "UNORDERED", - portID |-> chanOpenTryDgr.portID, - channelID |-> chanOpenTryDgr.channelID, - counterpartyPortID |-> chanOpenTryDgr.counterpartyPortID, - counterpartyChannelID |-> chanOpenTryDgr.counterpartyChannelID - ] IN - - LET chanOpenTryConnectionEnd == [ - connectionEnd EXCEPT !.channelEnd = chanOpenTryChannelEnd - ] IN - - LET chanOpenTryChain == [ - chain EXCEPT !.connectionEnd = chanOpenTryConnectionEnd - ] IN - - chanOpenTryChain - - \* otherwise, do not update the chain store - ELSE chain - ELSE chain - -\* Handle "ChanOpenAck" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleChanOpenAck(chainID, chain, datagrams) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get chainID's channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get "ChanOpenAck" datagrams, with a valid channel ID - LET chanOpenAckDgrs == {dgr \in datagrams : - /\ dgr.type = "ChanOpenAck" - /\ dgr.portID = channelEnd.portID - /\ dgr.channelID = channelEnd.channelID - /\ dgr.proofHeight \in chain.counterpartyClientHeights} IN - - \* if there are valid "ChanOpenAck" datagrams, update the channel end - IF /\ chanOpenAckDgrs /= {} - /\ connectionEnd.state = "OPEN" - THEN \* if the channel end on the chain is in "INIT" or it is in "TRYOPEN", - \* update the channel end - IF \/ channelEnd.state = "INIT" - \/ channelEnd.state = "TRYOPEN" - THEN LET chanOpenAckDgr == CHOOSE dgr \in chanOpenAckDgrs : TRUE IN - LET chanOpenAckChannelEnd == [ - channelEnd EXCEPT !.state = "OPEN" - ] IN - LET chanOpenAckConnectionEnd == [ - connectionEnd EXCEPT !.channelEnd = chanOpenAckChannelEnd - ] IN - LET chanOpenAckChain == [ - chain EXCEPT !.connectionEnd = chanOpenAckConnectionEnd - ] IN - - chanOpenAckChain - - \* otherwise, do not update the chain store - ELSE chain - ELSE chain - - -\* Handle "ChanOpenConfirm" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleChanOpenConfirm(chainID, chain, datagrams) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get chainID's channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get "ChanOpenConfirm" datagrams, with a valid channel ID - LET chanOpenConfirmDgrs == {dgr \in datagrams : - /\ dgr.type = "ChanOpenConfirm" - /\ dgr.portID = channelEnd.portID - /\ dgr.channelID = channelEnd.channelID - /\ dgr.proofHeight \in chain.counterpartyClientHeights} IN - - \* if there are valid "ChanOpenConfirm" datagrams, update the channel end - IF /\ chanOpenConfirmDgrs /= {} - /\ connectionEnd.state = "OPEN" - THEN \* if the channel end on the chain is in "TRYOPEN", update the channel end - IF channelEnd.state = "TRYOPEN" - THEN LET chanOpenConfirmDgr == CHOOSE dgr \in chanOpenConfirmDgrs : TRUE IN - LET chanOpenConfirmChannelEnd == [ - channelEnd EXCEPT !.state = "OPEN" - ] IN - LET chanOpenConfirmConnectionEnd == [ - connectionEnd EXCEPT !.channelEnd = chanOpenConfirmChannelEnd - ] IN - LET chanOpenConfirmChain == [ - chain EXCEPT !.connectionEnd = chanOpenConfirmConnectionEnd - ] IN - - chanOpenConfirmChain - - \* otherwise, do not update the chain store - ELSE chain - ELSE chain - -\* Handle "ChanCloseInit" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleChanCloseInit(chainID, chain, datagrams) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get chainID's channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get "ChanCloseInit" datagrams, with a valid channel ID - LET chanCloseInitDgrs == {dgr \in datagrams : - /\ dgr.type = "ChanCloseInit" - /\ dgr.portID = channelEnd.portID - /\ dgr.channelID = channelEnd.channelID} IN - - \* if there are valid "ChanCloseInit" datagrams - IF /\ chanCloseInitDgrs /= {} - \* and the channel end is neither UNINIT nor CLOSED - /\ channelEnd.state \notin {"UNINIT", "CLOSED"} - \* and the connection end is OPEN - /\ connectionEnd.state = "OPEN" - THEN \* then close the channel end - LET chanCloseInitChannelEnd == [ - channelEnd EXCEPT !.state = "CLOSED" - ] IN - LET chanCloseInitConnectionEnd == [ - connectionEnd EXCEPT !.channelEnd = chanCloseInitChannelEnd - ] IN - LET chanCloseInitChain == [ - chain EXCEPT !.connectionEnd = chanCloseInitConnectionEnd - ] IN - - chanCloseInitChain - - \* otherwise, do not update the chain store - ELSE chain - -\* Handle "ChanCloseConfirm" datagrams -\* @type: (Str, CHAINSTORE, Set(DATAGRAM)) => CHAINSTORE; -HandleChanCloseConfirm(chainID, chain, datagrams) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get chainID's channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get "ChanCloseConfirm" datagrams, with a valid channel ID - LET chanCloseConfirmDgrs == {dgr \in datagrams : - /\ dgr.type = "ChanCloseConfirm" - /\ dgr.portID = channelEnd.portID - /\ dgr.channelID = channelEnd.channelID - /\ dgr.proofHeight \in chain.counterpartyClientHeights} IN - - \* if there are valid "ChanCloseConfirm" datagrams - IF /\ chanCloseConfirmDgrs /= {} - \* and the channel end is neither UNINIT nor CLOSED - /\ channelEnd.state \notin {"UNINIT", "CLOSED"} - \* and the connection end is OPEN - /\ connectionEnd.state = "OPEN" - THEN \* then close the channel end - LET chanCloseConfirmChannelEnd == [ - channelEnd EXCEPT !.state = "CLOSED" - ] IN - LET chanCloseConfirmConnectionEnd == [ - connectionEnd EXCEPT !.channelEnd = chanCloseConfirmChannelEnd - ] IN - LET chanCloseConfirmChain == [ - chain EXCEPT !.connectionEnd = chanCloseConfirmConnectionEnd - ] IN - - chanCloseConfirmChain - - \* otherwise, do not update the chain store - ELSE chain - -============================================================================= -\* Modification History -\* Last modified Mon Apr 12 14:22:44 CEST 2021 by ilinastoilkovska -\* Created Tue Apr 07 16:58:02 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/ibc-core/ICS04PacketHandlers.tla b/docs/spec/tla/ibc-core/ICS04PacketHandlers.tla deleted file mode 100644 index 4e4808f040..0000000000 --- a/docs/spec/tla/ibc-core/ICS04PacketHandlers.tla +++ /dev/null @@ -1,368 +0,0 @@ ------------------------- MODULE ICS04PacketHandlers ------------------------ - -(*************************************************************************** - This module contains definitions of operators that are used to handle - packet datagrams. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, IBCCoreDefinitions - -(*************************************************************************** - Packet datagram handlers - ***************************************************************************) - -\* Handle "PacketRecv" datagrams -\* @type: (Str, CHAINSTORE, DATAGRAM, Seq(LOGENTRY)) => [chainStore: CHAINSTORE, packetLog: Seq(LOGENTRY)]; -HandlePacketRecv(chainID, chain, packetDatagram, log) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get chainID's channel end - LET channelEnd == connectionEnd.channelEnd IN - \* get packet - LET packet == packetDatagram.packet IN - - IF \* if the channel and connection ends are open for packet transmission - /\ channelEnd.state = "OPEN" - /\ connectionEnd.state = "OPEN" - \* if the packet has not passed the timeout height - /\ \/ packet.timeoutHeight = 0 - \/ chain.height < packet.timeoutHeight - \* if the "PacketRecv" datagram has valid port and channel IDs - /\ packet.srcPortID = channelEnd.counterpartyPortID - /\ packet.srcChannelID = channelEnd.counterpartyChannelID - /\ packet.dstPortID = channelEnd.portID - /\ packet.dstChannelID = channelEnd.channelID - \* if "PacketRecv" datagram can be verified - /\ packetDatagram.proofHeight \in chain.counterpartyClientHeights - THEN \* construct log entry for packet log - LET logEntry == [ - type |-> "PacketRecv", - srcChainID |-> chainID, - sequence |-> packet.sequence, - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - timeoutHeight |-> packet.timeoutHeight - ] IN - - \* if the channel is unordered and the packet has not been received - IF /\ channelEnd.order = "UNORDERED" - /\ [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence - ] \notin chain.packetReceipts - THEN LET newChainStore == [chain EXCEPT - \* record that the packet has been received - !.packetReceipts = - chain.packetReceipts - \union - {[ - channelID |-> packet.dstChannelID, - portID |-> packet.dstPortID, - sequence |-> packet.sequence - ]}, - \* add packet to the set of packets for which an acknowledgement should be written - !.packetsToAcknowledge = Append(chain.packetsToAcknowledge, packet)] IN - - [chainStore |-> newChainStore, packetLog |-> Append(log, logEntry)] - - ELSE \* if the channel is ordered and the packet sequence is nextRcvSeq - IF /\ channelEnd.order = "ORDERED" - /\ packet.sequence = channelEnd.nextRcvSeq - THEN LET newChainStore == [chain EXCEPT - \* increase the nextRcvSeq - !.connectionEnd.channelEnd.nextRcvSeq = - chain.connectionEnd.channelEnd.nextRcvSeq + 1, - \* add packet to the set of packets for which an acknowledgement should be written - !.packetsToAcknowledge = Append(chain.packetsToAcknowledge, packet)] IN - - [chainStore |-> newChainStore, packetLog |-> Append(log, logEntry)] - - - \* otherwise, do not update the chain store and the log - ELSE [chainStore |-> chain, packetLog |-> log] - ELSE [chainStore |-> chain, packetLog |-> log] - - -\* Handle "PacketAck" datagrams -\* @type: (Str, CHAINSTORE, DATAGRAM, Seq(LOGENTRY)) => [chainStore: CHAINSTORE, packetLog: Seq(LOGENTRY)]; -HandlePacketAck(chainID, chain, packetDatagram, log) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get chainID's channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get packet - LET packet == packetDatagram.packet IN - \* get packet committment that should be in chain store - LET packetCommitment == [ - portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight - ] IN - - IF \* if the channel and connection ends are open for packet transmission - /\ channelEnd.state = "OPEN" - /\ connectionEnd.state = "OPEN" - \* if the packet commitment exists in the chain store - /\ packetCommitment \in chain.packetCommitments - \* if the "PacketRecv" datagram has valid port and channel IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* if the "PacketAck" datagram can be verified - /\ packetDatagram.proofHeight \in chain.counterpartyClientHeights - THEN \* if the channel is ordered and the packet sequence is nextAckSeq - LET newChainStore == - IF /\ channelEnd.order = "ORDERED" - /\ packet.sequence = channelEnd.nextAckSeq - THEN \* increase the nextAckSeq and remove packet commitment - [chain EXCEPT - !.connectionEnd.channelEnd.nextAckSeq = - chain.connectionEnd.channelEnd.nextAckSeq + 1, - !.packetCommitments = chain.packetCommitments \ {packetCommitment}] - \* if the channel is unordered, remove packet commitment - ELSE IF channelEnd.order = "UNORDERED" - THEN [chain EXCEPT - !.packetCommitments = chain.packetCommitments \ {packetCommitment}] - \* otherwise, do not update the chain store - ELSE chain IN - - [chainStore |-> newChainStore, packetLog |-> log] - - \* otherwise, do not update the chain store and the log - ELSE [chainStore |-> chain, packetLog |-> log] - - -\* write packet committments to chain store -\* @type: (CHAINSTORE, PACKET) => CHAINSTORE; -WritePacketCommitment(chain, packet) == - \* get chainID's connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get chainID's channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get latest counterparty client height - LET latestClientHeight == GetMaxCounterpartyClientHeight(chain) IN - - IF \* channel end is neither null nor closed - /\ channelEnd.state \notin {"UNINIT", "CLOSED"} - \* connection end is initialized - /\ connectionEnd.state /= "UNINIT" - \* if the packet has valid port and channel IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* timeout height has not passed - /\ \/ packet.timeoutHeight = 0 - \/ latestClientHeight < packet.timeoutHeight - THEN IF \* if the channel is ordered, check if packetSeq is nextSendSeq, - \* add a packet committment in the chain store, and increase nextSendSeq - /\ channelEnd.order = "ORDERED" - /\ packet.sequence = channelEnd.nextSendSeq - THEN [chain EXCEPT - !.packetCommitments = - chain.packetCommitments \union {[portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight]}, - !.connectionEnd.channelEnd.nextSendSeq = channelEnd.nextSendSeq + 1 - ] - \* otherwise, do not update the chain store - ELSE IF \* if the channel is unordered, - \* add a packet committment in the chain store - /\ channelEnd.order = "UNORDERED" - THEN [chain EXCEPT - !.packetCommitments = - chain.packetCommitments \union {[portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight]} - ] - \* otherwise, do not update the chain store - ELSE chain - ELSE chain - -\* write acknowledgements to chain store -\* @type: (CHAINSTORE, PACKET) => CHAINSTORE; -WriteAcknowledgement(chain, packet) == - \* create a packet acknowledgement for this packet - LET packetAcknowledgement == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence, - acknowledgement |-> TRUE - ] IN - - \* if the acknowledgement for the packet has not been written - IF packetAcknowledgement \notin chain.packetAcknowledgements - THEN \* write the acknowledgement to the chain store and remove - \* the packet from the set of packets to acknowledge - [chain EXCEPT !.packetAcknowledgements = - chain.packetAcknowledgements - \union - {packetAcknowledgement}, - !.packetsToAcknowledge = - Tail(chain.packetsToAcknowledge)] - - \* remove the packet from the sequence of packets to acknowledge - ELSE [chain EXCEPT !.packetsToAcknowledge = - Tail(chain.packetsToAcknowledge)] - -\* log acknowledgements to packet Log -\* @type: (Str, CHAINSTORE, Seq(LOGENTRY), PACKET) => Seq(LOGENTRY); -LogAcknowledgement(chainID, chain, log, packet) == - \* create a packet acknowledgement for this packet - LET packetAcknowledgement == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence, - acknowledgement |-> TRUE - ] IN - - \* if the acknowledgement for the packet has not been written - IF packetAcknowledgement \notin chain.packetAcknowledgements - THEN \* append a "WriteAck" log entry to the log - LET packetLogEntry == [ - type |-> "WriteAck", - srcChainID |-> chainID, - sequence |-> packet.sequence, - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - timeoutHeight |-> packet.timeoutHeight, - acknowledgement |-> TRUE - ] IN - Append(log, packetLogEntry) - \* do not add anything to the log - ELSE log - - -\* check if a packet timed out -\* @type: (CHAINSTORE, CHAINSTORE, PACKET, Int) => CHAINSTORE; -TimeoutPacket(chain, counterpartyChain, packet, proofHeight) == - \* get connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get counterparty channel end - LET counterpartyChannelEnd == GetChannelEnd(counterpartyChain) IN - - \* get packet committment that should be in chain store - LET packetCommitment == [ - portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight - ] IN - \* get packet receipt that should be absent in counterparty chain store - LET packetReceipt == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence - ] IN - - \* if channel end is open - IF /\ channelEnd.state = "OPEN" - \* srcChannelID and srcPortID match channel and port IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - \* dstChannelID and dstPortID match counterparty channel and port IDs - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* packet has timed out - /\ packet.timeoutHeight > 0 - /\ proofHeight >= packet.timeoutHeight - \* chain has sent the packet - /\ packetCommitment \in chain.packetCommitments - \* counterparty chain has not received the packet - /\ \/ /\ channelEnd.order = "ORDERED" - /\ counterpartyChannelEnd.nextRcvSeq <= packet.sequence - \/ /\ channelEnd.order = "UNORDERED" - /\ packetReceipt \notin counterpartyChain.packetReceipts - \* counterparty channel end has dstPortID and dstChannelID - /\ counterpartyChannelEnd.portID = packet.dstPortID - /\ counterpartyChannelEnd.channelID = packet.dstChannelID - \* close ordered channel and remove packet commitment - THEN LET updatedChannelEnd == [channelEnd EXCEPT - !.state = IF channelEnd.order = "ORDERED" - THEN "CLOSED" - ELSE channelEnd.state] IN - LET updatedConnectionEnd == [connectionEnd EXCEPT - !.channelEnd = updatedChannelEnd] IN - LET updatedChainStore == [chain EXCEPT - !.packetCommitments = - chain.packetCommitments \ {packetCommitment}, - !.connectionEnd = updatedConnectionEnd] IN - - updatedChainStore - - \* otherwise, do not update the chain store - ELSE chain - -\* check if a packet timed out on close -\* @type: (CHAINSTORE, CHAINSTORE, PACKET, Int) => CHAINSTORE; -TimeoutOnClose(chain, counterpartyChain, packet, proofHeight) == - \* get connection end - LET connectionEnd == GetConnectionEnd(chain) IN - \* get channel end - LET channelEnd == GetChannelEnd(chain) IN - \* get counterparty channel end - LET counterpartyChannelEnd == GetChannelEnd(counterpartyChain) IN - - \* get packet committment that should be in chain store - LET packetCommitment == [ - portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight - ] IN - \* get packet receipt that should be absent in counterparty chain store - LET packetReceipt == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence - ] IN - - - \* if srcChannelID and srcPortID match channel and port IDs - IF /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - \* if dstChannelID and dstPortID match counterparty channel and port IDs - /\ packet.dstPort = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* chain has sent the packet - /\ packetCommitment \in chain.packetCommitments - \* counterparty channel end is closed and its fields are as expected - /\ counterpartyChannelEnd.state = "CLOSED" - /\ counterpartyChannelEnd.portID = packet.dstPortID - /\ counterpartyChannelEnd.channelID = packet.dstChannelID - /\ counterpartyChannelEnd.counterpartyPortID = packet.srcPortID - /\ counterpartyChannelEnd.counterpartyChannelID = packet.srcChannelID - \* counterparty chain has not received the packet - /\ \/ /\ channelEnd.order = "ORDERED" - /\ counterpartyChannelEnd.nextRcvSeq <= packet.sequence - \/ /\ channelEnd.order = "UNORDERED" - /\ packetReceipt \notin counterpartyChain.packetReceipts - \* close ordered channel and remove packet commitment - THEN LET updatedChannelEnd == [channelEnd EXCEPT - !.state = IF channelEnd.order = "ORDERED" - THEN "CLOSED" - ELSE channelEnd.state] IN - LET updatedConnectionEnd == [connectionEnd EXCEPT - !.channelEnd = updatedChannelEnd] IN - LET updatedChainStore == [chain EXCEPT - !.packetCommitments = - chain.packetCommitments \ {packetCommitment}, - !.connectionEnd = updatedConnectionEnd] IN - - updatedChainStore - - \* otherwise, do not update the chain store - ELSE chain - - -============================================================================= -\* Modification History -\* Last modified Mon Apr 12 14:22:40 CEST 2021 by ilinastoilkovska -\* Created Wed Jul 29 14:30:04 CEST 2020 by ilinastoilkovska diff --git a/docs/spec/tla/ibc-core/ICS18Relayer.tla b/docs/spec/tla/ibc-core/ICS18Relayer.tla deleted file mode 100644 index b3d9e8cf40..0000000000 --- a/docs/spec/tla/ibc-core/ICS18Relayer.tla +++ /dev/null @@ -1,448 +0,0 @@ ---------------------------- MODULE ICS18Relayer ---------------------------- - -(*************************************************************************** - This module contains the specification of a relayer, which is an off-chain - process running a relayer algorithm. - ***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, IBCCoreDefinitions - -CONSTANTS GenerateClientDatagrams, \* toggle generation of client datagrams - GenerateConnectionDatagrams, \* toggle generation of connection datagrams - GenerateChannelDatagrams, \* toggle generation of channel datagrams - GeneratePacketDatagrams \* toggle generation of packet datagrams - -ASSUME /\ GenerateClientDatagrams \in BOOLEAN - /\ GenerateConnectionDatagrams \in BOOLEAN - /\ GenerateChannelDatagrams \in BOOLEAN - /\ GeneratePacketDatagrams \in BOOLEAN - -CONSTANTS - MaxHeight, \* set of possible heights of the chains in the system - MaxVersion, \* maximal connection / channel version (we assume versions are integers) - MaxPacketSeq \* maximal packet sequence number - -VARIABLES - chainAstore, \* store of ChainA - chainBstore, \* store of ChainB - outgoingDatagrams, \* a function that assigns a set of pending datagrams - \* outgoing from the relayer to each chainID - outgoingPacketDatagrams, \* a dedicated datagrams channel for packet datagrams - relayerHeights, \* a function that assigns a height to each chainID - closeChannelA, \* flag that triggers closing of the channel end at ChainA - closeChannelB, \* flag that triggers closing of the channel end at ChainB - packetLog \* packet log - -vars == <> -Heights == 1..MaxHeight \* set of possible heights of the chains in the system -Versions == 1..MaxVersion \* set of possible connection versions - -\* @type: (Str) => CHAINSTORE; -GetChainByID(chainID) == - IF chainID = "chainA" - THEN chainAstore - ELSE chainBstore - -\* @type: (Str) => Bool; -GetCloseChannelFlag(chainID) == - IF chainID = "chainA" - THEN closeChannelA - ELSE closeChannelB - -(*************************************************************************** - Client datagrams - ***************************************************************************) -\* Compute client datagrams designated for dstChainID. -\* These are used to update the client for srcChainID on dstChainID. -\* Some client updates might trigger an update of the height that -\* the relayer stores for srcChainID -\* @type: (Str, Str, Str -> Int) => [datagrams: Set(DATAGRAM), relayerUpdate: Str -> Int]; -ClientDatagrams(srcChainID, dstChainID, relayer) == - LET srcChain == GetChainByID(srcChainID) IN - LET dstChain == GetChainByID(dstChainID) IN - LET srcChainHeight == srcChain.height IN - LET srcClientHeight == GetMaxCounterpartyClientHeight(dstChain) IN - LET srcClientID == GetClientID(srcChainID) IN - - LET emptySetDatagrams == {} IN - - \* check if the relayer chain height for srcChainID should be updated - LET srcRelayerChainHeight == - IF relayer[srcChainID] < srcChainHeight - THEN srcChainHeight - ELSE relayer[srcChainID] IN - - \* create an updated relayer - LET updatedRelayer == - [relayer EXCEPT ![srcChainID] = srcRelayerChainHeight] IN - - \* generate datagrams for dstChainID - LET dstDatagrams == - IF srcClientHeight = nullHeight - THEN \* the src client does not exist on dstChainID - {[ - type |-> "ClientCreate", - height |-> srcChainHeight, - clientID |-> srcClientID - ]} - ELSE \* the src client exists on dstChainID - IF srcClientHeight < srcChainHeight - THEN \* the height of the src client on dstChainID is smaller than the height of the src chain - {[ - type |-> "ClientUpdate", - height |-> srcChainHeight, - clientID |-> srcClientID - ]} - ELSE emptySetDatagrams IN - - [datagrams|-> dstDatagrams, relayerUpdate |-> updatedRelayer] - -(*************************************************************************** - Connection datagrams - ***************************************************************************) -\* Compute connection datagrams designated for dstChainID. -\* These are used to update the connection end on dstChainID. -ConnectionDatagrams(srcChainID, dstChainID) == - LET srcChain == GetChainByID(srcChainID) IN - LET dstChain == GetChainByID(dstChainID) IN - - LET srcConnectionEnd == srcChain.connectionEnd IN - LET dstConnectionEnd == dstChain.connectionEnd IN - - LET srcConnectionID == GetConnectionID(srcChainID) IN - LET dstConnectionID == GetConnectionID(dstChainID) IN - - LET srcHeight == srcChain.height IN - LET srcClientHeight == GetMaxCounterpartyClientHeight(srcChain) IN - - LET emptySetDatagrams == {} IN - - LET dstDatagrams == - IF dstConnectionEnd.state = "UNINIT" /\ srcConnectionEnd.state = "UNINIT" THEN - {[ - type |-> "ConnOpenInit", - connectionID |-> dstConnectionID, \* "connBtoA" (if srcChainID = "chainA", dstChainID = "chainB") - clientID |-> GetCounterpartyClientID(dstChainID), \* "clA" - counterpartyConnectionID |-> srcConnectionID, \* "connAtoB" - counterpartyClientID |-> GetCounterpartyClientID(srcChainID) \* "clB" - ]} - - ELSE IF /\ srcClientHeight /= nullHeight - /\ srcConnectionEnd.state = "INIT" /\ \/ dstConnectionEnd.state = "UNINIT" - \/ dstConnectionEnd.state = "INIT" THEN - {[ - type |-> "ConnOpenTry", - desiredConnectionID |-> srcConnectionEnd.counterpartyConnectionID, \* "connBtoA" (if srcChainID = "chainA", dstChainID = "chainB") - counterpartyConnectionID |-> srcConnectionEnd.connectionID, \* "connAtoB" - clientID |-> srcConnectionEnd.counterpartyClientID, \* "clA" - counterpartyClientID |-> srcConnectionEnd.clientID, \* "clB" - versions |-> srcConnectionEnd.versions, - proofHeight |-> srcHeight, - consensusHeight |-> srcClientHeight - ]} - - ELSE IF /\ srcClientHeight /= nullHeight - /\ srcConnectionEnd.state = "TRYOPEN" /\ \/ dstConnectionEnd.state = "INIT" - \/ dstConnectionEnd.state = "TRYOPEN" THEN - {[ - type |-> "ConnOpenAck", - connectionID |-> dstConnectionID, \* "connBtoA" (if srcChainID = "chainA", dstChainID = "chainB") - versions |-> srcConnectionEnd.versions, - proofHeight |-> srcHeight, - consensusHeight |-> srcClientHeight - ]} - - ELSE IF srcConnectionEnd.state = "OPEN" /\ dstConnectionEnd.state = "TRYOPEN" THEN - {[ - type |-> "ConnOpenConfirm", - connectionID |-> dstConnectionEnd.connectionID, \* "connBtoA" (if srcChainID = "chainA", dstChainID = "chainB") - proofHeight |-> srcHeight - ]} - ELSE emptySetDatagrams IN - - dstDatagrams - -(*************************************************************************** - Channel handshake datagrams - ***************************************************************************) -\* Compute channel datagrams designated for dstChainID. -\* These are used to update the channel end on dstChainID. -ChannelDatagrams(srcChainID, dstChainID) == - LET srcChain == GetChainByID(srcChainID) IN - LET dstChain == GetChainByID(dstChainID) IN - - LET srcChannelEnd == GetChannelEnd(srcChain) IN - LET dstChannelEnd == GetChannelEnd(dstChain) IN - - LET srcPortID == GetPortID(srcChainID) IN - LET dstPortID == GetPortID(dstChainID) IN - - LET srcChannelID == GetChannelID(srcChainID) IN - LET dstChannelID == GetChannelID(dstChainID) IN - - LET srcHeight == srcChain.height IN - - LET emptySetDatagrams == {} IN - - LET dstDatagrams == - IF dstChannelEnd.state = "UNINIT" /\ srcChannelEnd.state = "UNINIT" THEN - {[ - type |-> "ChanOpenInit", - portID |-> dstPortID, \* "portB" (if srcChainID = "chainA", dstChainID = "chainB") - channelID |-> dstChannelID, \* "chanBtoA" - counterpartyPortID |-> srcPortID, \* "portA" - counterpartyChannelID |-> srcChannelID \* "chanAtoB" - ]} - - ELSE IF srcChannelEnd.state = "INIT" /\ \/ dstChannelEnd.state = "UNINIT" - \/ dstChannelEnd.state = "INIT" THEN - {[ - type |-> "ChanOpenTry", - portID |-> dstPortID, \* "portB" (if srcChainID = "chainA", dstChainID = "chainB") - channelID |-> dstChannelID, \* "chanBtoA" - counterpartyPortID |-> srcPortID, \* "portA" - counterpartyChannelID |-> srcChannelID, \* "chanAtoB" - proofHeight |-> srcHeight - ]} - - ELSE IF srcChannelEnd.state = "TRYOPEN" /\ \/ dstChannelEnd.state = "INIT" - \/ dstChannelEnd.state = "TRYOPEN" THEN - {[ - type |-> "ChanOpenAck", - portID |-> dstChannelEnd.portID, \* "portB" (if srcChainID = "chainA", dstChainID = "chainB") - channelID |-> dstChannelEnd.channelID, \* "chanBtoA" - proofHeight |-> srcHeight - ]} - - ELSE IF srcChannelEnd.state = "OPEN" /\ dstChannelEnd.state = "TRYOPEN" THEN - {[ - type |-> "ChanOpenConfirm", - portID |-> dstChannelEnd.portID, \* "portB" (if srcChainID = "chainA", dstChainID = "chainB") - channelID |-> dstChannelEnd.channelID, \* "chanBtoA" - proofHeight |-> srcHeight - ]} - - \* channel closing datagrams creation only for open channels - ELSE IF dstChannelEnd.state = "OPEN" /\ GetCloseChannelFlag(dstChannelID) THEN - {[ - type |-> "ChanCloseInit", - portID |-> dstChannelEnd.portID, \* "portB" (if srcChainID = "chainA", dstChainID = "chainB") - channelID |-> dstChannelEnd.channelID \* "chanBtoA" - ]} - - ELSE IF /\ srcChannelEnd.state = "CLOSED" - /\ dstChannelEnd.state /= "CLOSED" - /\ dstChannelEnd.state /= "UNINIT" THEN - {[ - type |-> "ChanCloseConfirm", - portID |-> dstChannelEnd.portID, \* "portB" (if srcChainID = "chainA", dstChainID = "chainB") - channelID |-> dstChannelEnd.channelID, \* "chanBtoA" - proofHeight |-> srcHeight - ]} - - ELSE emptySetDatagrams IN - - dstDatagrams - -(*************************************************************************** - Packet datagrams - ***************************************************************************) -\* Compute a packet datagram based on the packetLogEntry -\* @type: (LOGENTRY) => DATAGRAM; -PacketDatagram(packetLogEntry) == - \* get chainID and its channel end - LET chainID == packetLogEntry.srcChainID IN - LET channelEnd == GetChainByID(chainID).connectionEnd.channelEnd IN - - \* get portID and counterpartyPortID - LET portID == channelEnd.portID IN \* "portA" (if srcChainID = "chainA") - LET counterpartyPortID == channelEnd.counterpartyPortID IN \* "portB" (if srcChainID = "chainA") - - \* get channelID and counterpartyChannelID - LET channelID == channelEnd.channelID IN \* "chanAtoB" (if srcChainID = "chainA") - LET counterpartyChannelID == channelEnd.counterpartyChannelID IN \* "chanBtoA" (if srcChainID = "chainA") - - LET srcHeight == GetChainByID(chainID).height IN - - \* the srcChannelID of the packet that is received is channelID, - \* the dstChannelID of the packet that is received is counterpartyChannelID - LET recvPacket == [ - sequence |-> packetLogEntry.sequence, - timeoutHeight |-> packetLogEntry.timeoutHeight, - srcPortID |-> portID, - srcChannelID |-> channelID, - dstPortID |-> counterpartyPortID, - dstChannelID |-> counterpartyChannelID - ] IN - - \* the srcChannelID of the packet that is acknowledged is counterpartyChannelID, - \* the dstChannelID of the packet that is acknowledged is channelID - LET ackPacket == [ - sequence |-> packetLogEntry.sequence, - timeoutHeight |-> packetLogEntry.timeoutHeight, - srcPortID |-> counterpartyPortID, - srcChannelID |-> counterpartyChannelID, - dstPortID |-> portID, - dstChannelID |-> channelID - ] IN - - IF packetLogEntry.type = "PacketSent" - THEN [ - type |-> "PacketRecv", - packet |-> recvPacket, - proofHeight |-> srcHeight - ] - ELSE IF packetLogEntry.type = "WriteAck" - THEN [ - type |-> "PacketAck", - packet |-> ackPacket, - acknowledgement |-> packetLogEntry.acknowledgement, - proofHeight |-> srcHeight - ] - ELSE NullDatagram - -(*************************************************************************** - Compute client, connection, channel datagrams (from srcChainID to dstChainID) - ***************************************************************************) -\* Currently supporting: -\* - ICS 02: Client updates -\* - ICS 03: Connection handshake -\* - ICS 04: Channel handshake -ComputeDatagrams(srcChainID, dstChainID) == - \* ICS 02 : Clients - \* - Determine if light clients needs to be updated - LET clientDatagrams == - IF GenerateClientDatagrams - THEN ClientDatagrams(srcChainID, dstChainID, relayerHeights) - ELSE [datagrams |-> {}, relayerUpdate |-> relayerHeights] IN - - \* ICS 03 : Connections - \* - Determine if any connection handshakes are in progress - LET connectionDatagrams == - IF GenerateConnectionDatagrams - THEN ConnectionDatagrams(srcChainID, dstChainID) - ELSE {} IN - - \* ICS 04 : Channels & Packets - \* - Determine if any channel handshakes are in progress - LET channelDatagrams == - IF GenerateChannelDatagrams - THEN ChannelDatagrams(srcChainID, dstChainID) - ELSE {} IN - - [datagrams |-> clientDatagrams.datagrams \union - connectionDatagrams \union - channelDatagrams, - relayerUpdate |-> clientDatagrams.relayerUpdate] - -(*************************************************************************** - Relayer actions - ***************************************************************************) -\* Update the height of the relayer client for some chainID -UpdateRelayerClientHeight(chainID) == - LET chainLatestHeight == GetChainByID(chainID).height IN - /\ relayerHeights[chainID] < chainLatestHeight - /\ relayerHeights' = [relayerHeights EXCEPT - ![chainID] = GetChainByID(chainID).height - ] - /\ UNCHANGED <> - /\ UNCHANGED <> - -\* for two chains, srcChainID and dstChainID, where srcChainID /= dstChainID, -\* create the pending datagrams and update the corresponding sets of pending datagrams -Relay(srcChainID, dstChainID) == - LET datagramsAndRelayerUpdate == ComputeDatagrams(srcChainID, dstChainID) IN - /\ srcChainID /= dstChainID - /\ outgoingDatagrams' = - [outgoingDatagrams EXCEPT - ![dstChainID] = outgoingDatagrams[dstChainID] - \union - datagramsAndRelayerUpdate.datagrams - ] - /\ relayerHeights' = datagramsAndRelayerUpdate.relayerUpdate - /\ UNCHANGED <> - /\ UNCHANGED <> - -\* given an entry from the packet log, create a packet datagram and -\* append it to the outgoing packet datagram queue for dstChainID -RelayPacketDatagram(srcChainID, dstChainID) == - /\ packetLog /= <<>> - /\ GeneratePacketDatagrams - /\ LET packetLogEntry == Head(packetLog) IN - LET packetDatagram == PacketDatagram(packetLogEntry) IN - \* if srcChainID matches the one from the log entry - /\ packetLogEntry.srcChainID = srcChainID - \* if dstChainID is the counterparty chain of srcChainID - /\ dstChainID = GetCounterpartyChainID(packetLogEntry.srcChainID) - /\ packetDatagram /= NullDatagram - /\ outgoingPacketDatagrams' = [outgoingPacketDatagrams EXCEPT - ![dstChainID] = Append(outgoingPacketDatagrams[dstChainID], - packetDatagram)] - /\ packetLog' = Tail(packetLog) - /\ UNCHANGED <> - /\ UNCHANGED <> - -\* update the relayer client heights -UpdateClient == - \E chainID \in ChainIDs : UpdateRelayerClientHeight(chainID) - -\* create client, connection, channel datagrams -CreateDatagrams == - \E srcChainID \in ChainIDs : \E dstChainID \in ChainIDs : - \* relay client, connection, channel datagrams - Relay(srcChainID, dstChainID) - -\* create packet datagrams -CreatePacketDatagrams == - \E srcChainID \in ChainIDs : \E dstChainID \in ChainIDs : - \* relay packet datagrams - RelayPacketDatagram(srcChainID, dstChainID) - - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -\* Initially: -\* - the relayer heights are uninitialized (i.e., their height is nullHeight) -\* - there are no datagrams -Init == - /\ relayerHeights = [chainID \in ChainIDs |-> nullHeight] - /\ outgoingDatagrams = [chainID \in ChainIDs |-> {}] - /\ outgoingPacketDatagrams = [chainID \in ChainIDs |-> <<>>] - -\* Next state action -\* The relayer either: -\* - updates its clients, or -\* - creates datagrams, or -\* - does nothing -Next == - \/ UpdateClient - \/ CreateDatagrams - \/ CreatePacketDatagrams - \/ UNCHANGED vars - -\* Fairness constraints -Fairness == - /\ \A chainID \in ChainIDs : - WF_vars(UpdateRelayerClientHeight(chainID)) - /\ \A srcChainID \in ChainIDs : \A dstChainID \in ChainIDs : - WF_vars(Relay(srcChainID, dstChainID)) - /\ \A srcChainID \in ChainIDs : \A dstChainID \in ChainIDs : - WF_vars(RelayPacketDatagram(srcChainID, dstChainID)) - - - -(*************************************************************************** - Invariants - ***************************************************************************) -\* Type invariant -TypeOK == - /\ relayerHeights \in [ChainIDs -> Heights \union {nullHeight}] - /\ outgoingDatagrams \in [ChainIDs -> SUBSET Datagrams(Heights, MaxPacketSeq, Versions)] - /\ outgoingPacketDatagrams \in [ChainIDs -> Seq(Datagrams(Heights, MaxPacketSeq, Versions))] - -============================================================================= -\* Modification History -\* Last modified Mon Apr 12 14:30:40 CEST 2021 by ilinastoilkovska -\* Created Fri Mar 06 09:23:12 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/ibc-core/MC_IBCCore.tla b/docs/spec/tla/ibc-core/MC_IBCCore.tla deleted file mode 100644 index d9edd8e323..0000000000 --- a/docs/spec/tla/ibc-core/MC_IBCCore.tla +++ /dev/null @@ -1,53 +0,0 @@ ------------------------------ MODULE MC_IBCCore ----------------------------- - -MaxHeight == 2 -MaxVersion == 2 -MaxPacketSeq == 1 -ClientDatagramsRelayer1 == TRUE -ClientDatagramsRelayer2 == FALSE -ConnectionDatagramsRelayer1 == TRUE -ConnectionDatagramsRelayer2 == FALSE -ChannelDatagramsRelayer1 == TRUE -ChannelDatagramsRelayer2 == FALSE -PacketDatagramsRelayer1 == TRUE -PacketDatagramsRelayer2 == FALSE -ChannelOrdering == "UNORDERED" - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* chain store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* chain store of ChainB - \* @type: Set(DATAGRAM); - incomingDatagramsChainA, \* set of (client, connection, channel) datagrams incoming to ChainA - \* @type: Set(DATAGRAM); - incomingDatagramsChainB, \* set of (client, connection, channel) datagrams incoming to ChainB - \* @type: Seq(DATAGRAM); - incomingPacketDatagramsChainA, \* sequence of packet datagrams incoming to ChainA - \* @type: Seq(DATAGRAM); - incomingPacketDatagramsChainB, \* sequence of packet datagrams incoming to ChainB - \* @type: Str -> Int; - relayer1Heights, \* the client heights of Relayer1 - \* @type: Str -> Int; - relayer2Heights, \* the client heights of Relayer2 - \* @type: Str -> Set(DATAGRAM); - outgoingDatagrams, \* sets of (client, connection, channel) datagrams outgoing of the relayers - \* @type: Str -> Seq(DATAGRAM); - outgoingPacketDatagrams, \* sequences of packet datagrams outgoing of the relayers - \* @type: Bool; - closeChannelA, \* flag that triggers closing of the channel end at ChainA - \* @type: Bool; - closeChannelB, \* flag that triggers closing of the channel end at ChainB - \* @type: HISTORY; - historyChainA, \* history variables for ChainA - \* @type: HISTORY; - historyChainB, \* history variables for ChainB - \* @type: Seq(LOGENTRY); - packetLog, \* packet log - \* @type: Int; - appPacketSeqChainA, \* packet sequence number from the application on ChainA - \* @type: Int; - appPacketSeqChainB \* packet sequence number from the application on ChainB - -INSTANCE IBCCore -============================================================================= \ No newline at end of file diff --git a/docs/spec/tla/ibc-core/README.md b/docs/spec/tla/ibc-core/README.md deleted file mode 100644 index 388dc4711c..0000000000 --- a/docs/spec/tla/ibc-core/README.md +++ /dev/null @@ -1,221 +0,0 @@ -# TLA+ specification of the IBC Core protocols - -A TLA+ specification of the IBC Core protocols ([ICS02](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-002-client-semantics), [ICS03](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-003-connection-semantics), [ICS04](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics), [ICS18](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-018-relayer-algorithms)). -In particular, the main module is [IBCCore.tla](IBCCore.tla) and models a -system consisting of two chains and two relayers. -The model allows to express concurrency aspects of a system with multiple (correct) relayers. -The specification is written in a modular way, in order to facilitate future -formal verification of properties and invariants in an adversarial setting. - -## Modules - -The specification has the following modules: - - `IBCCore.tla` (the main module) - - `ICS18Relayer.tla` - - `Chain.tla` - - `ICS02ClientHandlers.tla` - - `ICS03ConnectionHandlers.tla` - - `ICS04ChannelHandlers.tla` - - `ICS04PacketHandlers.tla` - - `IBCCoreDefinitions.tla` - -### [`ICS18Relayer.tla`](ICS18Relayer.tla) -A relayer relays datagrams between the two chains. Its transition relation is defined by the formula: -```tla -Next == - \/ UpdateClient - \/ CreateDatagrams - \/ UNCHANGED vars -``` -where `UpdateClient` and `CreateDatagrams` are scheduled non-deterministically. -`UpdateClient` picks a light client on the relayer for some chain and updates it. `CreateDatagrams` picks a direction (a pair of source and destination chain) and -creates client, connection, channel, and packet datagrams (i.e., it captures the -logic of [`pendingDatagrams()`](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-018-relayer-algorithms#pending-datagrams)). - -### [`Chain.tla`](Chain.tla) -The chain state is represented by a chain store, which is a snapshot of the provable and private stores, to the extent necessary for IBC. Additionally, a chain has dedicated -datagram containers for: -1. client, connection, and channel datagrams (given by a set of datagrams), -2. packet datagrams (given by a queue of datagrams that models the order in which the datagrams were submitted by the relayer). - -Its transition relation is defined by the formula: -```tla -Next == - \/ AdvanceChain - \/ HandleIncomingDatagrams - \/ SendPacket - \/ AcknowledgePacket - \/ UNCHANGED vars -``` -where: -- `AdvanceChain`: increments the height of the chain, -- `HandleIncomingDatagrams`: dispatches the datagrams to the appropriate handlers. -This captures the logic of the [routing module](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-026-routing-module). -- `SendPacket`: models user/application-defined calls to send a packet. As this specification does not have a specific application in mind, we abstract away from the packet data, and allow sending packets non-deterministically. -The packet commitment is written in the chain store, and the sent packet is logged, -which triggers the relayer to create a `PacketRecv` datagram. -- `AcknowledgePacket`: writes an acknowledgement for a received packet - in the chain store and on the packet log, which triggers the relayer to create a `PacketAck` datagram. - -### [`ICS02ClientHandlers.tla`](ICS02ClientHandlers.tla), [`ICS03ConnectionHandlers.tla`](ICS03ConnectionHandlers.tla), [`ICS04ChannelHandlers.tla`](ICS04ChannelHandlers.tla), [`ICS04PacketHandlers.tla`](ICS04PacketHandlers.tla) -These TLA+ modules contain definitions of -operators that handle client, connection handshake, channel handshake, and packet -datagrams, respectively. -These operators capture the logic of the handlers defined in [ICS02](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-002-client-semantics), [ICS03](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-003-connection-semantics), and -[ICS04](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics). - - - - - -The module `Relayer.tla` contains the specification of the relayer algorithm. -The module `Chain.tla` captures the chain logic. -It extends the modules `ClientHandlers.tla`, -`ConnectionHandlers.tla`, `ChannelHandlers.tla`, and -`PacketHandlers.tla`, which contain definition of -operators that handle client, connection handshake, channel handshake, and packet -datagrams, respectively. -The module `RelayerDefinitions.tla` contains definition of operators that are used across all the -modules. - -## Properties and Invariants - -### System-level properties - -We specify three kinds of properties for the IBC core protocols in the module [IBCCore.tla](IBCCore.tla): - -- `IBCSafety`: Bad datagrams are not used to update the chain stores. - -- `IBCDelivery`: If `ChainA` sends a datagram to `ChainB`, then `ChainB` eventually receives the datagram - - - -### Packets - -[ICS04](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics) specifies the following list of ["Desired -Properties"](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics#desired-properties) - -#### [Efficiency](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics#efficiency) - -Efficiency seems to be too vague to formalize. In particular the -formulation ignores relayers that are the active components in packet -transmission. It is not clear what a suitable way is to formalize it. - -#### [Exactly-once delivery](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics#exactly-once-delivery) - -These properties are also vague as: - -* in the absence of a relayer no packets can be delivered -* ignores timeouts -* unspecific what "sent" means. We suggest it means that a packet commitment is written in the provable store (in our model `ChainStore`) rather than executing `SendPacket`. - -As a result we suggest that the property should be decomposed into to properties: - -* (at most once) For each packer `p`, if a chain performs `RecvPacket(p)` successfully (without abort), it will - not perform `RecvPacket(p)` successfully in the future. - - -* (typical case) If - * sender and receiver chain are valid, and - * there is a correct relayer, and - * communication is bounded in time, and - * the `timeoutHeights` and times are luckily chosen, and - * the receiver chain does not censor the packet - - then the packet will be delivered. - - -The second property ignores that timeouts can happen. - -If this is the confirmed intended behavior, these properties can be expressed -and verified -by a slight modification of the specification, in particular, the way in which -the packet receipts are stored in the chain store (in a set vs. in a sequence). - -#### [Ordering](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics#ordering) - -- ordered channels: It is not clear what "if packet x is sent before packet y by a channel end on chain A" meant in a context where chain A performs invalid transitions: then a packet with sequence number *i* can be sent after *i+1*. If this happens, the IBC implementation may be broken (depends on the relayer). - -In the context of two valid chains, this property can be -expressed and verified by adding a history -variable on the receiving side, which is modified by transitions of the receiving chain. - -- no property defined for unordered. - -#### [Permissioning](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics#permissioning) - -This property is about capabilities. We do not capture capabilities in the TLA+ specification. - - - -### Channel - -As there are no explicit properties regarding channels given in [ICS 04](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics) in textual form, we have formalized that the channel handshake does not deviate from the channel lifecycle provided as a [figure](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-004-channel-and-packet-semantics/channel-state-machine.png). They are given in [IBCCore.tla](IBCCore.tla) under the names - -- `ChannelInitSafety` -- `ChannelTryOpenSafety` -- `ChannelOpenSafety` -- `ChannelCloseSafety` - -### Connection Handshake - -Similar to Channel handshake, we have formalized that the connection handshake does not deviate from the channel lifecycle provided as a [figure](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-003-connection-semantics/state.png). They are given in [IBCCore.tla](IBCCore.tla) under the names - -- `ConnectionInitSafety` -- `ConnectionTryOpenSafety` -- `ConnectionOpenSafety` - - -We formalize [these properties](https://github.com/cosmos/ibc/tree/5877197dc03e844542cb8628dd52674a37ca6ff9/spec/ics-003-connection-semantics#properties--invariants) as follows: -> Connection identifiers are first-come-first-serve: once a connection has been negotiated, a unique identifier pair exists between two chains. - -[ICS3-Proto-1-ConnectionUniqueness](https://github.com/informalsystems/ibc-rs/blob/master/docs/spec/connection-handshake/L1_2.md#guarantees) A module accepts (i.e., initializes on) a connection end at most once. - -> The connection handshake cannot be man-in-the-middled by another blockchain's IBC handler. - -The scenario is not clear, so we did not formalize it. - - - -## Using the Model - -### Constants - -The module `IBCCore.tla` is parameterized by the constants: - - `ClientDatagramsRelayer_i`, for `i in {1, 2}`, a Boolean flag defining if `Relayer_i` creates client datagrams, - - `ConnectionDatagramsRelayer_i`, for `i in {1, 2}`, a Boolean flag defining if `Relayer_i` creates connection datagrams, - - `ChannelDatagramsRelayer_i`, for `i in {1, 2}`, a Boolean flag defining if `Relayer_i` creates channel datagrams, - - `PacketDatagramsRelayer_i`, for `i in {1, 2}`, a Boolean flag defining if `Relayer_i` creates packet datagrams, - - `MaxHeight`, a natural number denoting the maximal height of the chains, - - `MaxVersion`, a natural number denoting the maximal connection / channel version supported, - - `MaxPacketSeq`, a natural number denoting the maximal packet sequence number, - - `ChannelOrdering`, a string indicating whether the channels are ordered or unordered - -#### Assigning values to the constants - -The Boolean flags, defined as constants in the module `IBCCore.tla`, allow us to run experiments in different settings. For example, if we set both `ClientDatagramsRelayer1` and `ClientDatagramsRelayer2` to `TRUE` in a TLC model, then the two relayers in the system concurrently create datagrams related to client creation and client update, and the model checker will check the temporal properties related to client datagrams. - -Observe that the setting where, for example, `ClientDatagramsRelayer1 = TRUE`, `ConnectionDatagramsRelayer2 = TRUE`, `ChannelDatagramsRelayer1 = TRUE`, `PacketDatagramsRelayer1 = TRUE`, and the remaining boolean flags are `FALSE`, is equivalent to having a single relayer. - -### Importing the specification into TLA+ toolbox - -To import the specification in the TLA+ toolbox and run TLC: - - add a new spec in TLA+ toolbox with the root-module file `IBCCore.tla` - - create a model - - assign a value to the constants (example values can be found in `IBCCore.cfg`) - - choose "Temporal formula" as the behavior spec, and use the formula `Spec` - - add the properties `IBCSafety` and `IBCDelivery` - - run TLC on the model - -#### Basic checks with TLC - -We ran TLC using the constants defined in `IBCCore.cfg` and verified the invariant `TypeOK` in 14min and the invariant `IBCInv` in 11min. -As TLC usually takes longer to check safety and liveness properties, we have not -conducted extensive experiments to check `IBCSafety` and `IBCDelivery` with TLC yet. - -#### Apalache - -The specification contains type annotations for the -model checker [Apalache](https://github.com/informalsystems/apalache). -The specification passes the type check using the type checker [Snowcat](https://apalache.informal.systems/docs/apalache/typechecker-snowcat.html) -integrated in Apalache. diff --git a/docs/spec/tla/packet-delay/Chain.tla b/docs/spec/tla/packet-delay/Chain.tla deleted file mode 100644 index c21ad200d0..0000000000 --- a/docs/spec/tla/packet-delay/Chain.tla +++ /dev/null @@ -1,159 +0,0 @@ -------------------------------- MODULE Chain ------------------------------- - -EXTENDS Integers, FiniteSets, Sequences, ICS04PacketHandlers, IBCPacketDelayDefinitions - -CONSTANTS - MaxHeight, \* maximal chain height - ChannelOrdering, \* indicate whether the channels are ordered or unordered - MaxPacketSeq, \* maximal packet sequence number - MaxDelay, \* maximal packet delay - ChainID \* a chain ID - -VARIABLES - chainStore, \* chain store, containing client heights and a channel end - incomingPacketDatagrams, \* sequence of incoming packet datagrams - appPacketSeq, \* packet sequence number from the application on the chain - packetLog, \* packet log - packetDatagramTimestamp \* history variable that tracks when packet datagrams were processed - -vars == <> -Heights == 1..MaxHeight \* set of possible heights of the chains in the system - -(*************************************************************************** - Packet update operators - ***************************************************************************) -\* Update the chain store and packet log with packet datagrams -(* @type: (Str, DATAGRAM, Seq(LOGENTRY)) => - [chainStore: CHAINSTORE, packetLog: Seq(LOGENTRY), datagramTimestamp: <> -> Int]; -*) -PacketUpdate(chainID, packetDatagram, log) == - - LET packet == packetDatagram.packet IN - \* get the new updated store, packet log - LET packetUpdate == - IF packetDatagram.type = "PacketRecv" - THEN HandlePacketRecv(chainID, chainStore, packetDatagram, MaxDelay, log, packetDatagramTimestamp) - ELSE IF packetDatagram.type = "PacketAck" - THEN HandlePacketAck(chainID, chainStore, packetDatagram, MaxDelay, log, packetDatagramTimestamp) - ELSE [chainStore |-> chainStore, - packetLog |-> log, - datagramTimestamp |-> packetDatagramTimestamp] - IN - - LET packetUpdateStore == packetUpdate.chainStore IN - - \* update height and timestamp - LET updatedStore == - IF packetUpdateStore.height + 1 \in Heights - THEN [packetUpdateStore EXCEPT - !.height = packetUpdateStore.height + 1, - !.timestamp = packetUpdateStore.timestamp + 1] - ELSE [packetUpdateStore EXCEPT - !.timestamp = packetUpdateStore.timestamp + 1] - IN - - [chainStore |-> updatedStore, - packetLog |-> packetUpdate.packetLog, - datagramTimestamp |-> packetUpdate.datagramTimestamp] - -(*************************************************************************** - Chain actions - ***************************************************************************) -\* Advance the height of the chain until MaxHeight is reached -AdvanceChain == - /\ chainStore.height + 1 \in Heights - /\ chainStore' = [chainStore EXCEPT - !.height = chainStore.height + 1, - !.timestamp = chainStore.timestamp + 1] - /\ UNCHANGED <> - -\* handle the incoming packet datagrams -HandlePacketDatagrams == - \* enabled if incomingPacketDatagrams is not empty - /\ incomingPacketDatagrams /= <<>> - /\ LET packetUpdate == PacketUpdate(ChainID, Head(incomingPacketDatagrams), packetLog) IN - /\ chainStore' = packetUpdate.chainStore - /\ packetLog' = packetUpdate.packetLog - /\ incomingPacketDatagrams' = Tail(incomingPacketDatagrams) - /\ packetDatagramTimestamp' = packetUpdate.datagramTimestamp - /\ UNCHANGED appPacketSeq - -\* Send a packet -SendPacket == - \* enabled if appPacketSeq is not bigger than MaxPacketSeq - /\ appPacketSeq <= MaxPacketSeq - \* Create packet - /\ LET packet == [ - sequence |-> appPacketSeq, - timeoutHeight |-> MaxHeight, - srcPortID |-> chainStore.channelEnd.portID, - srcChannelID |-> chainStore.channelEnd.channelID, - dstPortID |-> chainStore.channelEnd.counterpartyPortID, - dstChannelID |-> chainStore.channelEnd.counterpartyChannelID] IN - \* update chain store with packet committment - /\ chainStore' = WritePacketCommitment(chainStore, packet) - \* log sent packet - /\ packetLog' = Append(packetLog, - [type |-> "PacketSent", - srcChainID |-> ChainID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight] - ) - \* increase application packet sequence - /\ appPacketSeq' = appPacketSeq + 1 - /\ UNCHANGED <> - - - -\* Acknowledge a packet -AcknowledgePacket == - /\ chainStore.packetsToAcknowledge /= <<>> - \* write acknowledgements to chain store - /\ chainStore' = WriteAcknowledgement(chainStore, Head(chainStore.packetsToAcknowledge)) - \* log acknowledgement - /\ packetLog' = LogAcknowledgement(ChainID, chainStore, packetLog, Head(chainStore.packetsToAcknowledge)) - /\ UNCHANGED <> - -(*************************************************************************** - Specification - ***************************************************************************) -\* Initial state predicate -\* Initially -\* - the chain store is initialized to -\* InitChainStore(ChainID, ChannelOrdering, MaxDelay) -\* (defined in IBCPacketDelayDefinitions.tla) -\* - incomingPacketDatagrams is an empty sequence -\* - the appPacketSeq is set to 1 -Init == - /\ chainStore = InitChainStore(ChainID, Heights, ChannelOrdering, MaxDelay) - /\ incomingPacketDatagrams = <<>> - /\ appPacketSeq = 1 - -\* Next state action -\* The chain either -\* - advances its height -\* - receives datagrams and updates its state -\* - sends a packet -\* - acknowledges a packet -Next == - \/ AdvanceChain - \/ HandlePacketDatagrams - \/ SendPacket - \/ AcknowledgePacket - \/ UNCHANGED vars - -(*************************************************************************** - Invariants - ***************************************************************************) - -\* type invariant -TypeOK == - /\ chainStore \in ChainStores(Heights, ChannelOrdering, MaxPacketSeq) - /\ incomingPacketDatagrams \in Seq(Datagrams(Heights, MaxPacketSeq)) - /\ appPacketSeq \in Int - /\ packetLog \in Seq(PacketLogEntries(Heights, MaxPacketSeq)) - -============================================================================= -\* Modification History -\* Last modified Mon Apr 19 15:44:24 CEST 2021 by ilinastoilkovska -\* Created Thu Dec 10 13:52:13 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/packet-delay/IBCPacketDelay.cfg b/docs/spec/tla/packet-delay/IBCPacketDelay.cfg deleted file mode 100644 index 6b6fab9bd3..0000000000 --- a/docs/spec/tla/packet-delay/IBCPacketDelay.cfg +++ /dev/null @@ -1,12 +0,0 @@ -CONSTANTS - MaxHeight = 3 - ChannelOrdering = "UNORDERED" - MaxPacketSeq = 1 - MaxDelay = 1 - -INIT Init -NEXT Next - -INVARIANTS - TypeOK - Inv \ No newline at end of file diff --git a/docs/spec/tla/packet-delay/IBCPacketDelay.tla b/docs/spec/tla/packet-delay/IBCPacketDelay.tla deleted file mode 100644 index c52903bb35..0000000000 --- a/docs/spec/tla/packet-delay/IBCPacketDelay.tla +++ /dev/null @@ -1,332 +0,0 @@ ---------------------------- MODULE IBCPacketDelay --------------------------- - -(*************************************************************************** - A TLA+ specification of the IBC packet transmission with packet delays. - Packet delays ensure that packet-related data should be accepted only - after some delay has passed since the corresponding header is installed. -***************************************************************************) - -EXTENDS Integers, FiniteSets, Sequences, IBCPacketDelayDefinitions - -CONSTANTS - \* @type: Int; - MaxHeight, \* maximal height of all the chains in the system - \* @type: Str; - ChannelOrdering, \* indicate whether the channels are ordered or unordered - \* @type: Int; - MaxPacketSeq, \* maximal packet sequence number - \* @type: Int; - MaxDelay \* maximal packet delay - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* store of ChainB - \* @type: Seq(DATAGRAM); - packetDatagramsChainA, \* sequence of packet datagrams incoming to ChainA - \* @type: Seq(DATAGRAM); - packetDatagramsChainB, \* sequence of packet datagrams incoming to ChainB - \* @type: Str -> Seq(DATAGRAM); - outgoingPacketDatagrams, \* packet datagrams created by the relayer but not submitted - \* @type: Seq(LOGENTRY); - packetLog, \* packet log - \* @type: Int; - appPacketSeqChainA, \* packet sequence number from the application on ChainA - \* @type: Int; - appPacketSeqChainB, \* packet sequence number from the application on ChainB - \* @type: <> -> Int; - packetDatagramTimestamp \* history variable that tracks when packet datagrams were processed - -chainAvars == <> -chainBvars == <> -vars == <> - -Heights == 1..MaxHeight - -(*************************************************************************** - Instances of Chain - ***************************************************************************) - -\* We suppose there are two chains that communicate, ChainA and ChainB -\* ChainA -- Instance of Chain.tla -ChainA == INSTANCE Chain - WITH ChainID <- "chainA", - chainStore <- chainAstore, - incomingPacketDatagrams <- packetDatagramsChainA, - appPacketSeq <- appPacketSeqChainA - -\* ChainB -- Instance of Chain.tla -ChainB == INSTANCE Chain - WITH ChainID <- "chainB", - chainStore <- chainBstore, - incomingPacketDatagrams <- packetDatagramsChainB, - appPacketSeq <- appPacketSeqChainB - - (*************************************************************************** - Environment operators - ***************************************************************************) - -\* get chain store by ID -\* @type: (Str) => CHAINSTORE; -GetChainByID(chainID) == - IF chainID = "chainA" - THEN chainAstore - ELSE chainBstore - -\* update the client height of the client for the counterparty chain of chainID -UpdateClientHeights(chainID) == - /\ \/ /\ chainID = "chainA" - /\ chainAstore.counterpartyClientHeights[chainBstore.height] = 0 - /\ chainAstore' = [chainAstore EXCEPT - !.counterpartyClientHeights = [chainAstore.counterpartyClientHeights EXCEPT - ![chainBstore.height] = chainAstore.timestamp], - !.timestamp = chainAstore.timestamp + 1 - ] - /\ UNCHANGED chainBstore - \/ /\ chainID = "chainB" - /\ chainBstore.counterpartyClientHeights[chainAstore.height] = 0 - /\ chainBstore' = [chainBstore EXCEPT - !.counterpartyClientHeights = [chainBstore.counterpartyClientHeights EXCEPT - ![chainAstore.height] = chainBstore.timestamp], - !.timestamp = chainBstore.timestamp + 1 - ] - /\ UNCHANGED chainAstore - \/ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED <> - - -\* Compute a packet datagram designated for dstChainID, based on the packetLogEntry -\* @type: (Str, Str, LOGENTRY) => DATAGRAM; -PacketDatagram(srcChainID, dstChainID, packetLogEntry) == - - LET srcChannelID == GetChannelID(srcChainID) IN \* "chanAtoB" (if srcChainID = "chainA") - LET dstChannelID == GetChannelID(dstChainID) IN \* "chanBtoA" (if dstChainID = "chainB") - - LET srcPortID == GetPortID(srcChainID) IN \* "portA" (if srcChainID = "chainA") - LET dstPortID == GetPortID(dstChainID) IN \* "portB" (if dstChainID = "chainB") - - LET srcHeight == GetLatestHeight(GetChainByID(srcChainID)) IN - - \* the source chain of the packet that is received by dstChainID is srcChainID - LET recvPacket == [ - sequence |-> packetLogEntry.sequence, - timeoutHeight |-> packetLogEntry.timeoutHeight, - srcChannelID |-> srcChannelID, - srcPortID |-> srcPortID, - dstChannelID |-> dstChannelID, - dstPortID |-> dstPortID - ] IN - - \* the source chain of the packet that is acknowledged by srcChainID is dstChainID - LET ackPacket == [ - sequence |-> packetLogEntry.sequence, - timeoutHeight |-> packetLogEntry.timeoutHeight, - srcChannelID |-> dstChannelID, - srcPortID |-> dstPortID, - dstChannelID |-> srcChannelID, - dstPortID |-> srcPortID - ] IN - - IF packetLogEntry.type = "PacketSent" - THEN [ - type |-> "PacketRecv", - packet |-> recvPacket, - proofHeight |-> srcHeight - ] - ELSE IF packetLogEntry.type = "WriteAck" - THEN [ - type |-> "PacketAck", - packet |-> ackPacket, - acknowledgement |-> packetLogEntry.acknowledgement, - proofHeight |-> srcHeight - ] - ELSE NullDatagram - -\* submit a packet datagram if a delay has passed -\* or install the appropriate height if it is missing -(* @type: (Str) => -[ - datagramsChainA: Seq(DATAGRAM), datagramsChainB: Seq(DATAGRAM), - outgoingDatagrams: Str -> Seq(DATAGRAM), - chainA: CHAINSTORE, chainB: CHAINSTORE -]; -*) -SubmitDatagramOrInstallClientHeight(chainID) == - LET packetDatagram == Head(outgoingPacketDatagrams[chainID]) IN - LET chain == GetChainByID(chainID) IN - - \* if the proof height of the packet datagram is installed on the chain, - \* then clientHeightTimestamp is the timestamp, denoting the time when this - \* height was installed on the chain; - \* otherwise it is 0, denoting that this height is not installed on the chain - LET clientHeightTimestamp == chain.counterpartyClientHeights[packetDatagram.proofHeight] IN - - \* packetDatagram.proof height is installed on chain - IF clientHeightTimestamp /= 0 - \* the delay has passed - THEN IF clientHeightTimestamp + MaxDelay < chain.timestamp - \* submit the datagram to the corresponding chain - THEN LET datagramsChainA == IF chainID = "chainA" - THEN Append(packetDatagramsChainA, packetDatagram) - ELSE packetDatagramsChainA IN - LET datagramsChainB == IF chainID = "chainB" - THEN Append(packetDatagramsChainB, packetDatagram) - ELSE packetDatagramsChainB IN - LET outgoingDatagrams == [outgoingPacketDatagrams EXCEPT - ![chainID] = Tail(outgoingPacketDatagrams[chainID])] IN - - [datagramsChainA |-> datagramsChainA, - datagramsChainB |-> datagramsChainB, - outgoingDatagrams |-> outgoingDatagrams, - chainA |-> chainAstore, - chainB |-> chainBstore] - \* the client height is installed, but the delay has not passed - \* do not submit and do not install any new heights - ELSE [datagramsChainA |-> packetDatagramsChainA, - datagramsChainB |-> packetDatagramsChainB, - outgoingDatagrams |-> outgoingPacketDatagrams, - chainA |-> chainAstore, - chainB |-> chainBstore] - \* packetDatagram.proof height is not installed on chain, install it - ELSE LET chainA == IF chainID = "chainA" - THEN [chainAstore EXCEPT - !.counterpartyClientHeights = - [chainAstore.counterpartyClientHeights EXCEPT - ![packetDatagram.proofHeight] = chainAstore.timestamp], - !.timestamp = chainAstore.timestamp + 1 - ] - ELSE chainAstore IN - LET chainB == IF chainID = "chainB" - THEN [chainBstore EXCEPT - !.counterpartyClientHeights = - [chainBstore.counterpartyClientHeights EXCEPT - ![packetDatagram.proofHeight] = chainBstore.timestamp], - !.timestamp = chainBstore.timestamp + 1 - ] - ELSE chainBstore IN - - [datagramsChainA |-> packetDatagramsChainA, - datagramsChainB |-> packetDatagramsChainB, - outgoingDatagrams |-> outgoingPacketDatagrams, - chainA |-> chainA, - chainB |-> chainB] - -(*************************************************************************** - Environment actions - ***************************************************************************) - \* update the client height of some chain - UpdateClients == - \E chainID \in ChainIDs : UpdateClientHeights(chainID) - -\* create datagrams depending on packet log -CreateDatagrams == - /\ packetLog /= <<>> - /\ LET packetLogEntry == Head(packetLog) IN - LET srcChainID == packetLogEntry.srcChainID IN - LET dstChainID == GetCounterpartyChainID(srcChainID) IN - LET packetDatagram == PacketDatagram(srcChainID, dstChainID, packetLogEntry) IN - /\ \/ /\ packetDatagram = NullDatagram - /\ UNCHANGED outgoingPacketDatagrams - \/ /\ packetDatagram /= NullDatagram - /\ outgoingPacketDatagrams' = - [chainID \in ChainIDs |-> - IF chainID = dstChainID - THEN Append(outgoingPacketDatagrams[chainID], packetDatagram) - ELSE outgoingPacketDatagrams[chainID] - ] - /\ packetLog' = Tail(packetLog) - /\ UNCHANGED <> - /\ UNCHANGED <> - /\ UNCHANGED <> - -\* submit datagrams if delay has passed -SubmitDatagramsWithDelay == - \E chainID \in ChainIDs : - /\ outgoingPacketDatagrams[chainID] /= <<>> - /\ LET submitted == SubmitDatagramOrInstallClientHeight(chainID) IN - /\ packetDatagramsChainA' = submitted.datagramsChainA - /\ packetDatagramsChainB' = submitted.datagramsChainB - /\ outgoingPacketDatagrams' = submitted.outgoingDatagrams - /\ chainAstore' = submitted.chainA - /\ chainBstore' = submitted.chainB - /\ UNCHANGED <> - -(*************************************************************************** - Component actions - ***************************************************************************) - -\* ChainAction: either chain takes a step, leaving the other -\* variables unchange -ChainAction == - \/ /\ ChainA!Next - /\ UNCHANGED chainBvars - /\ UNCHANGED outgoingPacketDatagrams - \/ /\ ChainB!Next - /\ UNCHANGED chainAvars - /\ UNCHANGED outgoingPacketDatagrams - -\* EnvironmentAction: either -\* - create packet datagrams if packet log is not empty, or -\* - update counterparty clients, or -\* - submit datagrams after their delay has passed -EnvironmentAction == - \/ CreateDatagrams - \/ UpdateClients - \/ SubmitDatagramsWithDelay - -(*************************************************************************** - Specification - ***************************************************************************) - -\* Initial state predicate -Init == - /\ ChainA!Init - /\ ChainB!Init - /\ outgoingPacketDatagrams = [chainID \in ChainIDs |-> <<>>] - /\ packetLog = <<>> - /\ packetDatagramTimestamp = [<> \in ChainIDs \X Heights |-> 0] - -\* Next state action -Next == - \/ ChainAction - \/ EnvironmentAction - \/ UNCHANGED vars - -Spec == Init /\ [][Next]_vars - -(*************************************************************************** - Invariants - ***************************************************************************) - -\* type invariant -TypeOK == - /\ ChainA!TypeOK - /\ ChainB!TypeOK - /\ outgoingPacketDatagrams \in [ChainIDs -> Seq(Datagrams(Heights, MaxPacketSeq))] - /\ packetDatagramTimestamp \in [ChainIDs \X Heights -> Int] - -\* each packet datagam is processed at time t (stored in packetDatagramTimestamp), -\* such that t >= ht + delay, where -\* ht is the time when the client height is installed -PacketDatagramsDelay == - \A chainID \in ChainIDs : - \A h \in Heights : - /\ GetChainByID(chainID).counterpartyClientHeights[h] /= 0 - /\ packetDatagramTimestamp[<>] /= 0 - => - packetDatagramTimestamp[<>] >= GetChainByID(chainID).counterpartyClientHeights[h] + MaxDelay - -\* a conjunction of all invariants -Inv == - /\ PacketDatagramsDelay - -============================================================================= -\* Modification History -\* Last modified Mon Apr 19 15:43:40 CEST 2021 by ilinastoilkovska -\* Created Thu Dec 10 13:44:21 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/packet-delay/IBCPacketDelayDefinitions.tla b/docs/spec/tla/packet-delay/IBCPacketDelayDefinitions.tla deleted file mode 100644 index cad0aee7ea..0000000000 --- a/docs/spec/tla/packet-delay/IBCPacketDelayDefinitions.tla +++ /dev/null @@ -1,419 +0,0 @@ ---------------------- MODULE IBCPacketDelayDefinitions --------------------- - -EXTENDS Integers, FiniteSets, Sequences - -(************************ TYPE ALIASES FOR SNOWCAT *************************) -(* @typeAlias: CHAN = - [ - state: Str, - order: Str, - portID: Str, - channelID: Str, - counterpartyPortID: Str, - counterpartyChannelID: Str, - nextSendSeq: Int, - nextRcvSeq: Int, - nextAckSeq: Int - ]; -*) -(* @typeAlias: PACKET = - [ - sequence: Int, - timeoutHeight: Int, - srcPortID: Str, - srcChannelID: Str, - dstPortID: Str, - dstChannelID: Str - ]; -*) -(* @typeAlias: PACKETCOMM = - [ - portID: Str, - channelID: Str, - sequence: Int, - timeoutHeight: Int - ]; -*) -(* @typeAlias: PACKETREC = - [ - portID: Str, - channelID: Str, - sequence: Int - ]; -*) -(* @typeAlias: PACKETACK = - [ - portID: Str, - channelID: Str, - sequence: Int, - acknowledgement: Bool - ]; -*) -(* @typeAlias: CHAINSTORE = - [ - height: Int, - timestamp: Int, - counterpartyClientHeights: Int -> Int, - channelEnd: CHAN, - packetCommitments: Set(PACKETCOMM), - packetsToAcknowledge: Seq(PACKET), - packetReceipts: Set(PACKETREC), - packetAcknowledgements: Set(PACKETACK) - ]; -*) -(* @typeAlias: DATAGRAM = - [ - type: Str, - packet: PACKET, - proofHeight: Int, - acknowledgement: Bool - ]; -*) -(* @typeAlias: LOGENTRY = - [ - type: Str, - srcChainID: Str, - sequence: Int, - timeoutHeight: Int, - acknowledgement: Bool - ]; -*) - -(********************** Common operator definitions ***********************) -ChainIDs == {"chainA", "chainB"} -ChannelIDs == {"chanAtoB", "chanBtoA"} -PortIDs == {"portA", "portB"} -ChannelStates == {"OPEN", "CLOSED"} - -nullHeight == 0 -nullChannelID == "none" -nullPortID == "none" -nullEscrowAddress == "none" - -Max(S) == CHOOSE x \in S: \A y \in S: y <= x - -(******************************* ChannelEnds ******************************* - A set of channel end records. - A channel end record contains the following fields: - - - state -- a string - Stores the current state of this channel end. We assume that channel - handshake has successfully finished, that is, the state is either - "OPEN" or "CLOSED" - - - order -- a string - Stores whether the channel end is ordered or unordered. It has one - of the following values: "UNORDERED", "ORDERED". - - * ordered channels have three additional packet sequence fields: - nextSendSeq -- stores the sequence number of the next packet that - is going to be sent, - nextRcvSeq -- stores the sequence number of the next packet that - is going to be received, - nextAckSeq -- stores the sequence number of the next packet that - is going to be acknowledged. - - - portID -- a port identifier - Stores the port identifier of this channel end. - - - channelID -- a channel identifier - Stores the channel identifier of this channel end. - - - counterpartyPortID -- a port identifier - Stores the port identifier of the counterparty channel end. - - - counterpartyChannelID -- a channel identifier - Stores the channel identifier of the counterparty channel end. - - Note: we omit channel versions and connection hops. - ***************************************************************************) -\* @type: (Str, Int) => Set(CHAN); -ChannelEnds(channelOrdering, maxPacketSeq) == - IF channelOrdering = "UNORDERED" - THEN \* set of unordered channels - [ - state : ChannelStates, - order : {"UNORDERED"}, - portID : PortIDs \union {nullPortID}, - channelID : ChannelIDs \union {nullChannelID}, - counterpartyPortID : PortIDs \union {nullPortID}, - counterpartyChannelID : ChannelIDs \union {nullChannelID} - ] - ELSE \* set of ordered channels - [ - state : ChannelStates, - order : {"ORDERED"}, - nextSendSeq : 0..maxPacketSeq, - nextRcvSeq : 0..maxPacketSeq, - nextAckSeq : 0..maxPacketSeq, - portID : PortIDs \union {nullPortID}, - channelID : ChannelIDs \union {nullChannelID}, - counterpartyPortID : PortIDs \union {nullPortID}, - counterpartyChannelID : ChannelIDs \union {nullChannelID} - ] - - -(******* PacketCommitments, PacketReceipts, PacketAcknowledgements *********) -\* Set of packet commitments -\* @type: (Set(Int), Int) => Set(PACKETCOMM); -PacketCommitments(Heights, maxPacketSeq) == - [ - channelID : ChannelIDs, - portID : PortIDs, - sequence : 1..maxPacketSeq, - timeoutHeight : Heights - ] - -\* Set of packet receipts -\* @type: (Int) => Set(PACKETREC); -PacketReceipts(maxPacketSeq) == - [ - channelID : ChannelIDs, - portID : PortIDs, - sequence : 1..maxPacketSeq - ] - -\* Set of packet acknowledgements -\* @type: (Int) => Set(PACKETACK); -PacketAcknowledgements(maxPacketSeq) == - [ - channelID : ChannelIDs, - portID : PortIDs, - sequence : 1..maxPacketSeq, - acknowledgement : BOOLEAN - ] - -(********************************* Packets *********************************) -\* Set of packets -\* @type: (Set(Int), Int) => Set(PACKET); -Packets(Heights, maxPacketSeq) == - [ - sequence : 1..maxPacketSeq, - timeoutHeight : Heights, - srcPortID : PortIDs, - srcChannelID : ChannelIDs, - dstPortID : PortIDs, - dstChannelID : ChannelIDs - ] - -(******************************** ChainStores ****************************** - A set of chain store records. - A chain store record contains the following fields: - - - height : an integer between nullHeight and MaxHeight. - Stores the current height of the chain. - - - counterpartyClientHeights : a set of integers between 1 and MaxHeight - Stores the heights of the client for the counterparty chain. - - - connectionEnd : a connection end record - Stores data about the connection with the counterparty chain. - - - packetCommitments : a set of packet commitments - A packet commitment is added to this set when a chain sends a packet - to the counterparty. - - - packetReceipts : a set of packet receipts - A packet receipt is added to this set when a chain received a packet - from the counterparty chain. - - - packetsToAcknowledge : a sequence of packets - A packet is added to this sequence when a chain receives it and is used - later for the receiver chain to write an acknowledgement for the packet. - - - packetAcknowledgements : a set of packet acknowledgements - A packet acknowledgement is added to this set when a chain writes an - acknowledgement for a packet it received from the counterparty. - - A chain store is the combination of the provable and private stores. - ***************************************************************************) -\* @type: (Set(Int), Str, Int) => Set(CHAINSTORE); -ChainStores(Heights, channelOrdering, maxPacketSeq) == - [ - height : Heights, - timestamp : Int, - counterpartyClientHeights : [Heights -> Int], - channelEnd : ChannelEnds(channelOrdering, maxPacketSeq), - packetCommitments : SUBSET(PacketCommitments(Heights, maxPacketSeq)), - packetReceipts : SUBSET(PacketReceipts(maxPacketSeq)), - packetsToAcknowledge : Seq(Packets(Heights, maxPacketSeq)), - packetAcknowledgements : SUBSET(PacketAcknowledgements(maxPacketSeq)) - ] - -(******************************** Datagrams ********************************) -\* Set of datagrams (we consider only packet datagrams) -\* @type: (Set(Int), Int) => Set(DATAGRAM); -Datagrams(Heights, maxPacketSeq) == - [ - type : {"PacketRecv"}, - packet : Packets(Heights, maxPacketSeq), - proofHeight : Heights - ] \union [ - type : {"PacketAck"}, - packet : Packets(Heights, maxPacketSeq), - acknowledgement : BOOLEAN, - proofHeight : Heights - ] - -\* Null datagram -NullDatagram == - [type |-> "null"] - -(**************************** PacketLogEntries *****************************) -\* Set of packet log entries -\* @type: (Set(Int), Int) => Set(LOGENTRY); -PacketLogEntries(Heights, maxPacketSeq) == - [ - type : {"PacketSent"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - timeoutHeight : Heights - ] \union [ - type : {"PacketRecv"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - portID : PortIDs, - channelID : ChannelIDs, - timeoutHeight : Heights - ] \union [ - type : {"WriteAck"}, - srcChainID : ChainIDs, - sequence : 1..maxPacketSeq, - portID : PortIDs, - channelID : ChannelIDs, - timeoutHeight : Heights, - acknowledgement : BOOLEAN - ] - -\* Null packet log entry -NullPacketLogEntry == - [type |-> "null"] - - -(*************************************************************************** - Chain helper operators - ***************************************************************************) - -\* get the ID of chainID's counterparty chain -\* @type: (Str) => Str; -GetCounterpartyChainID(chainID) == - IF chainID = "chainA" THEN "chainB" ELSE "chainA" - -\* get the maximal height of the client for chainID's counterparty chain -\* @type: (CHAINSTORE) => Int; -GetMaxCounterpartyClientHeight(chain) == - IF DOMAIN chain.counterpartyClientHeights /= {} - THEN Max(DOMAIN chain.counterpartyClientHeights) - ELSE nullHeight - -\* get the channel ID of the channel end at chainID -\* @type: (Str) => Str; -GetChannelID(chainID) == - IF chainID = "chainA" - THEN "chanAtoB" - ELSE IF chainID = "chainB" - THEN "chanBtoA" - ELSE nullChannelID - -\* get the channel ID of the channel end at chainID's counterparty chain -\* @type: (Str) => Str; -GetCounterpartyChannelID(chainID) == - IF chainID = "chainA" - THEN "chanBtoA" - ELSE IF chainID = "chainB" - THEN "chanAtoB" - ELSE nullChannelID - -\* get the port ID at chainID -\* @type: (Str) => Str; -GetPortID(chainID) == - IF chainID = "chainA" - THEN "portA" - ELSE IF chainID = "chainB" - THEN "portB" - ELSE nullPortID - -\* get the port ID at chainID's counterparty chain -\* @type: (Str) => Str; -GetCounterpartyPortID(chainID) == - IF chainID = "chainA" - THEN "portB" - ELSE IF chainID = "chainB" - THEN "portA" - ELSE nullPortID - -\* get the latest height of chain -\* @type: (CHAINSTORE) => Int; -GetLatestHeight(chain) == - chain.height - -(*************************************************************************** - Initial values of a channel end, connection end, chain store - ***************************************************************************) -\* Initial value of an unordered channel end: -\* - state is "OPEN" (we assume channel handshake has successfully finished) -\* - order is "UNORDERED" -\* - portID, channelID, counterpartyPortID, counterpartyChannelID depend on ChainID -\* @type: (Str) => CHAN; -InitUnorderedChannelEnd(ChainID) == - [ - state |-> "OPEN", - order |-> "UNORDERED", - portID |-> GetPortID(ChainID), - channelID |-> GetChannelID(ChainID), - counterpartyPortID |-> GetCounterpartyPortID(ChainID), - counterpartyChannelID |-> GetCounterpartyChannelID(ChainID) - ] - -\* Initial value of an ordered channel end: -\* - state is "OPEN" (we assume channel handshake has successfully finished) -\* - order is "ORDERED" -\* - nextSendSeq, nextRcvSeq, nextAckSeq are set to 0 -\* - portID, channelID, counterpartyPortID, counterpartyChannelID depend on ChainID -\* @type: (Str) => CHAN; -InitOrderedChannelEnd(ChainID) == - [ - state |-> "OPEN", - order |-> "ORDERED", - nextSendSeq |-> 0, - nextRcvSeq |-> 0, - nextAckSeq |-> 0, - portID |-> GetPortID(ChainID), - channelID |-> GetChannelID(ChainID), - counterpartyPortID |-> GetCounterpartyPortID(ChainID), - counterpartyChannelID |-> GetCounterpartyChannelID(ChainID) - ] - -\* Initial value of a channel end, based on the channel ordering -\* @type: (Str, Str) => CHAN; -InitChannelEnd(ChainID, ChannelOrdering) == - IF ChannelOrdering = "ORDERED" - THEN InitOrderedChannelEnd(ChainID) - ELSE InitUnorderedChannelEnd(ChainID) - -\* Initial value of the chain store: -\* - height is initialized to 1 -\* - timestamp is initialized to 1 -\* - there are no installed client heights -\* - the channel end is initialized to InitChannelEnd -\* - the packet committments, receipts, acknowledgements, and packets -\* to acknowledge are empty -\* @type: (Str, Set(Int), Str, Int) => CHAINSTORE; -InitChainStore(ChainID, Heights, ChannelOrdering, MaxDelay) == - [ - height |-> 1, - timestamp |-> 1, - counterpartyClientHeights |-> [h \in Heights |-> 0], - channelEnd |-> InitChannelEnd(ChainID, ChannelOrdering), - - packetCommitments |-> {}, - packetReceipts |-> {}, - packetAcknowledgements |-> {}, - packetsToAcknowledge |-> <<>> - ] - -============================================================================= -\* Modification History -\* Last modified Mon Apr 19 15:46:15 CEST 2021 by ilinastoilkovska -\* Created Thu Dec 10 14:06:33 CET 2020 by ilinastoilkovska - \ No newline at end of file diff --git a/docs/spec/tla/packet-delay/ICS04PacketHandlers.tla b/docs/spec/tla/packet-delay/ICS04PacketHandlers.tla deleted file mode 100644 index 22a3b359be..0000000000 --- a/docs/spec/tla/packet-delay/ICS04PacketHandlers.tla +++ /dev/null @@ -1,387 +0,0 @@ ------------------------- MODULE ICS04PacketHandlers ------------------------ - -EXTENDS Integers, FiniteSets, Sequences, IBCPacketDelayDefinitions - -(*************************************************************************** - Packet datagram handlers - ***************************************************************************) - -\* Handle "PacketRecv" datagrams -(* @type: (Str, CHAINSTORE, DATAGRAM, Int, Seq(LOGENTRY), <> -> Int) => - [chainStore: CHAINSTORE, packetLog: Seq(LOGENTRY), datagramTimestamp: <> -> Int]; -*) -HandlePacketRecv(chainID, chain, packetDatagram, delay, log, datagramTimestamp) == - \* get chainID's channel end - LET channelEnd == chain.channelEnd IN - \* get packet - LET packet == packetDatagram.packet IN - - \* if the proof height of the packet datagram is installed on the chain, - \* then clientHeightTimestamp is the timestamp, denoting the time when this - \* height was installed on the chain; - \* otherwise it is 0, denoting that this height is not installed on the chain - LET clientHeightTimestamp == chain.counterpartyClientHeights[packetDatagram.proofHeight] IN - - IF \* if the channel end is open for packet transmission - /\ channelEnd.state = "OPEN" - \* if the packet has not passed the timeout height - /\ \/ packet.timeoutHeight = 0 - \/ chain.height < packet.timeoutHeight - \* if the "PacketRecv" datagram has valid port and channel IDs - /\ packet.srcPortID = channelEnd.counterpartyPortID - /\ packet.srcChannelID = channelEnd.counterpartyChannelID - /\ packet.dstPortID = channelEnd.portID - /\ packet.dstChannelID = channelEnd.channelID - \* if "PacketRecv" datagram can be verified (i.e., proofHeight is installed) - /\ clientHeightTimestamp /= 0 - \* the "PacketRecv" datagram was received after packet delay - /\ clientHeightTimestamp + delay < chain.timestamp - THEN \* construct log entry for packet log - LET logEntry == [ - type |-> "PacketRecv", - srcChainID |-> chainID, - sequence |-> packet.sequence, - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - timeoutHeight |-> packet.timeoutHeight - ] IN - - \* if the channel is unordered and the packet has not been received - IF /\ channelEnd.order = "UNORDERED" - /\ [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence - ] \notin chain.packetReceipts - THEN LET newChainStore == [chain EXCEPT - \* record that the packet has been received - !.packetReceipts = - chain.packetReceipts - \union - {[ - channelID |-> packet.dstChannelID, - portID |-> packet.dstPortID, - sequence |-> packet.sequence - ]}, - \* add packet to the set of packets for which an acknowledgement should be written - !.packetsToAcknowledge = Append(chain.packetsToAcknowledge, packet)] IN - \* record the timestamp in the history variable - LET newDatagramTimestamp == [datagramTimestamp EXCEPT - ![<>] = chain.timestamp - ] IN - - [ - chainStore |-> newChainStore, - packetLog |-> Append(log, logEntry), - datagramTimestamp |-> newDatagramTimestamp - ] - - ELSE \* if the channel is ordered and the packet sequence is nextRcvSeq - IF /\ channelEnd.order = "ORDERED" - /\ packet.sequence = channelEnd.nextRcvSeq - THEN LET newChainStore == [chain EXCEPT - \* increase the nextRcvSeq - !.channelEnd.nextRcvSeq = - channelEnd.nextRcvSeq + 1, - \* add packet to the set of packets for which an acknowledgement should be written - !.packetsToAcknowledge = Append(chain.packetsToAcknowledge, packet)] IN - \* record the timestamp in the history variable - LET newDatagramTimestamp == [datagramTimestamp EXCEPT - ![<>] = chain.timestamp - ] IN - - [ - chainStore |-> newChainStore, - packetLog |-> Append(log, logEntry), - datagramTimestamp |-> newDatagramTimestamp - ] - - - \* otherwise, do not update the chain store and the log - ELSE [chainStore |-> chain, packetLog |-> log, datagramTimestamp |-> datagramTimestamp] - ELSE [chainStore |-> chain, packetLog |-> log, datagramTimestamp |-> datagramTimestamp] - - -\* Handle "PacketAck" datagrams -(* @type: (Str, CHAINSTORE, DATAGRAM, Int, Seq(LOGENTRY), <> -> Int) => - [chainStore: CHAINSTORE, packetLog: Seq(LOGENTRY), datagramTimestamp: <> -> Int]; -*) -HandlePacketAck(chainID, chain, packetDatagram, delay, log, datagramTimestamp) == - \* get chainID's channel end - LET channelEnd == chain.channelEnd IN - \* get packet - LET packet == packetDatagram.packet IN - \* get packet committment that should be in chain store - LET packetCommitment == [portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight] IN - - \* if the proof height of the packet datagram is installed on the chain, - \* then clientHeightTimestamp is the timestamp, denoting the time when this - \* height was installed on the chain; - \* otherwise it is 0, denoting that this height is not installed on the chain - LET clientHeightTimestamp == chain.counterpartyClientHeights[packetDatagram.proofHeight] IN - - IF \* if the channel end is open for packet transmission - /\ channelEnd.state = "OPEN" - \* if the packet committment exists in the chain store - /\ packetCommitment \in chain.packetCommitments - \* if the "PacketRecv" datagram has valid port and channel IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* if "PacketAck" datagram can be verified (i.e., proofHeight is installed) - /\ clientHeightTimestamp /= 0 - \* the "PacketAck" datagram was received after packet delay - /\ clientHeightTimestamp + delay < chain.timestamp - THEN \* if the channel is ordered and the packet sequence is nextAckSeq - LET newChainStore == - IF /\ channelEnd.order = "ORDERED" - /\ packet.sequence = channelEnd.nextAckSeq - THEN \* increase the nextAckSeq and remove packet commitment - [chain EXCEPT - !.channelEnd.nextAckSeq = - channelEnd.nextAckSeq + 1, - !.packetCommitments = chain.packetCommitments \ {packetCommitment}] - \* if the channel is unordered, remove packet commitment - ELSE IF channelEnd.order = "UNORDERED" - THEN [chain EXCEPT - !.packetCommitments = chain.packetCommitments \ {packetCommitment}] - \* otherwise, do not update the chain store - ELSE chain IN - - \* record the timestamp in the history variable - LET newDatagramTimestamp == [datagramTimestamp EXCEPT - ![<>] = chain.timestamp - ] IN - - [ - chainStore |-> newChainStore, - packetLog |-> log, - datagramTimestamp |-> newDatagramTimestamp - ] - - \* otherwise, do not update the chain store and the log - ELSE [chainStore |-> chain, packetLog |-> log, datagramTimestamp |-> datagramTimestamp] - - -\* write packet committments to chain store -\* @type: (CHAINSTORE, PACKET) => CHAINSTORE; -WritePacketCommitment(chain, packet) == - \* get channel end - LET channelEnd == chain.channelEnd IN - \* get latest counterparty client height - LET latestClientHeight == GetMaxCounterpartyClientHeight(chain) IN - - IF \* channel end is neither null nor closed - /\ channelEnd.state \notin {"UNINIT", "CLOSED"} - \* if the packet has valid port and channel IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* timeout height has not passed - /\ \/ packet.timeoutHeight = 0 - \/ latestClientHeight < packet.timeoutHeight - THEN IF \* if the channel is ordered, check if packetSeq is nextSendSeq, - \* add a packet committment in the chain store, and increase nextSendSeq - /\ channelEnd.order = "ORDERED" - /\ packet.sequence = channelEnd.nextSendSeq - THEN [chain EXCEPT - !.packetCommitments = - chain.packetCommitments \union {[portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight]}, - !.channelEnd = - [channelEnd EXCEPT !.nextSendSeq = channelEnd.nextSendSeq + 1], - !.timestamp = - chain.timestamp + 1 - ] - \* otherwise, do not update the chain store - ELSE chain - ELSE IF \* if the channel is unordered, - \* add a packet committment in the chain store - /\ channelEnd.order = "UNORDERED" - THEN [chain EXCEPT - !.packetCommitments = - chain.packetCommitments \union {[portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight]}, - !.timestamp = - chain.timestamp + 1 - ] - \* otherwise, do not update the chain store - ELSE chain - -\* write acknowledgements to chain store -\* @type: (CHAINSTORE, PACKET) => CHAINSTORE; -WriteAcknowledgement(chain, packet) == - \* create a packet acknowledgement for this packet - LET packetAcknowledgement == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence, - acknowledgement |-> TRUE - ] IN - - \* if the acknowledgement for the packet has not been written - IF packetAcknowledgement \notin chain.packetAcknowledgements - THEN \* write the acknowledgement to the chain store and remove - \* the packet from the set of packets to acknowledge - [chain EXCEPT !.packetAcknowledgements = - chain.packetAcknowledgements - \union - {packetAcknowledgement}, - !.packetsToAcknowledge = - Tail(chain.packetsToAcknowledge), - !.timestamp = - chain.timestamp + 1] - - \* remove the packet from the sequence of packets to acknowledge - ELSE [chain EXCEPT !.packetsToAcknowledge = - Tail(chain.packetsToAcknowledge), - !.timestamp = - chain.timestamp + 1] - -\* log acknowledgements to packet Log -\* @type: (Str, CHAINSTORE, Seq(LOGENTRY), PACKET) => Seq(LOGENTRY); -LogAcknowledgement(chainID, chain, log, packet) == - \* create a packet acknowledgement for this packet - LET packetAcknowledgement == [ - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence, - acknowledgement |-> TRUE - ] IN - - \* if the acknowledgement for the packet has not been written - IF packetAcknowledgement \notin chain.packetAcknowledgements - THEN \* append a "WriteAck" log entry to the log - LET packetLogEntry == - [type |-> "WriteAck", - srcChainID |-> chainID, - sequence |-> packet.sequence, - portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - timeoutHeight |-> packet.timeoutHeight, - acknowledgement |-> TRUE] IN - Append(log, packetLogEntry) - \* do not add anything to the log - ELSE log - - -\* check if a packet timed out -\* @type: (CHAINSTORE, CHAINSTORE, PACKET, Int) => CHAINSTORE; -TimeoutPacket(chain, counterpartyChain, packet, proofHeight) == - \* get channel end - LET channelEnd == chain.channelEnd IN - \* get counterparty channel end - LET counterpartyChannelEnd == counterpartyChain.channelEnd IN - - \* get packet committment that should be in chain store - LET packetCommitment == [portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight] IN - \* get packet receipt that should be absent in counterparty chain store - LET packetReceipt == [portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence] IN - - \* if channel end is open - IF /\ channelEnd.state = "OPEN" - \* srcChannelID and srcPortID match channel and port IDs - /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - \* dstChannelID and dstPortID match counterparty channel and port IDs - /\ packet.dstPortID = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* packet has timed out - /\ packet.timeoutHeight > 0 - /\ proofHeight >= packet.timeoutHeight - \* chain has sent the packet - /\ packetCommitment \in chain.packetCommitments - \* counterparty chain has not received the packet - /\ \/ /\ channelEnd.order = "ORDERED" - /\ counterpartyChannelEnd.nextRcvSeq <= packet.sequence - \/ /\ channelEnd.order = "UNORDERED" - /\ packetReceipt \notin counterpartyChain.packetReceipts - \* counterparty channel end has dstPortID and dstChannelID - /\ counterpartyChannelEnd.portID = packet.dstPortID - /\ counterpartyChannelEnd.channelID = packet.dstChannelID - \* close ordered channel and remove packet commitment - THEN LET updatedChannelEnd == [channelEnd EXCEPT - !.state = IF channelEnd.order = "ORDERED" - THEN "CLOSED" - ELSE channelEnd.state] IN - LET updatedChainStore == [chain EXCEPT - !.channelEnd = updatedChannelEnd, - !.packetCommitments = - chain.packetCommitments \ {packetCommitment}] IN - - updatedChainStore - - \* otherwise, do not update the chain store - ELSE chain - -\* check if a packet timed out on close -\* @type: (CHAINSTORE, CHAINSTORE, PACKET, Int) => CHAINSTORE; -TimeoutOnClose(chain, counterpartyChain, packet, proofHeight) == - \* get channel end - LET channelEnd == chain.channelEnd IN - \* get counterparty channel end - LET counterpartyChannelEnd == counterpartyChain.channelEnd IN - - \* get packet committment that should be in chain store - LET packetCommitment == [portID |-> packet.srcPortID, - channelID |-> packet.srcChannelID, - sequence |-> packet.sequence, - timeoutHeight |-> packet.timeoutHeight] IN - \* get packet receipt that should be absent in counterparty chain store - LET packetReceipt == [portID |-> packet.dstPortID, - channelID |-> packet.dstChannelID, - sequence |-> packet.sequence] IN - - - \* if srcChannelID and srcPortID match channel and port IDs - IF /\ packet.srcPortID = channelEnd.portID - /\ packet.srcChannelID = channelEnd.channelID - \* if dstChannelID and dstPortID match counterparty channel and port IDs - /\ packet.dstPort = channelEnd.counterpartyPortID - /\ packet.dstChannelID = channelEnd.counterpartyChannelID - \* chain has sent the packet - /\ packetCommitment \in chain.packetCommitments - \* counterparty channel end is closed and its fields are as expected - /\ counterpartyChannelEnd.state = "CLOSED" - /\ counterpartyChannelEnd.portID = packet.dstPortID - /\ counterpartyChannelEnd.channelID = packet.dstChannelID - /\ counterpartyChannelEnd.counterpartyPortID = packet.srcPortID - /\ counterpartyChannelEnd.counterpartyChannelID = packet.srcChannelID - \* counterparty chain has not received the packet - /\ \/ /\ channelEnd.order = "ORDERED" - /\ counterpartyChannelEnd.nextRcvSeq <= packet.sequence - \/ /\ channelEnd.order = "UNORDERED" - /\ packetReceipt \notin counterpartyChain.packetReceipts - \* close ordered channel and remove packet commitment - THEN LET updatedChannelEnd == [channelEnd EXCEPT - !.state = IF channelEnd.order = "ORDERED" - THEN "CLOSED" - ELSE channelEnd.state] IN - LET updatedChainStore == [chain EXCEPT - !.channelEnd = updatedChannelEnd, - !.packetCommitments = - chain.packetCommitments \ {packetCommitment}] IN - - updatedChainStore - - \* otherwise, do not update the chain store - ELSE chain - -============================================================================= -\* Modification History -\* Last modified Mon Apr 19 15:46:42 CEST 2021 by ilinastoilkovska -\* Created Thu Dec 10 15:12:41 CET 2020 by ilinastoilkovska diff --git a/docs/spec/tla/packet-delay/MC_IBCPacketDelay.tla b/docs/spec/tla/packet-delay/MC_IBCPacketDelay.tla deleted file mode 100644 index 1411f647c7..0000000000 --- a/docs/spec/tla/packet-delay/MC_IBCPacketDelay.tla +++ /dev/null @@ -1,30 +0,0 @@ --------------------------- MODULE MC_IBCPacketDelay ------------------------- - -MaxHeight == 3 -ChannelOrdering == "UNORDERED" -MaxPacketSeq == 1 -MaxDelay == 1 - -VARIABLES - \* @type: CHAINSTORE; - chainAstore, \* store of ChainA - \* @type: CHAINSTORE; - chainBstore, \* store of ChainB - \* @type: Seq(DATAGRAM); - packetDatagramsChainA, \* sequence of packet datagrams incoming to ChainA - \* @type: Seq(DATAGRAM); - packetDatagramsChainB, \* sequence of packet datagrams incoming to ChainB - \* @type: Str -> Seq(DATAGRAM); - outgoingPacketDatagrams, \* packet datagrams created by the relayer but not submitted - \* @type: Seq(LOGENTRY); - packetLog, \* packet log - \* @type: Int; - appPacketSeqChainA, \* packet sequence number from the application on ChainA - \* @type: Int; - appPacketSeqChainB, \* packet sequence number from the application on ChainB - \* @type: <> -> Int; - packetDatagramTimestamp \* history variable that tracks when packet datagrams were processed - -INSTANCE IBCPacketDelay - -============================================================================= diff --git a/docs/spec/tla/packet-delay/README.md b/docs/spec/tla/packet-delay/README.md deleted file mode 100644 index b01cdc44c0..0000000000 --- a/docs/spec/tla/packet-delay/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# TLA+ Specification of IBC Packet Transmission with Packet Delay (deprecated) - -This document describes the TLA+ specification of an IBC packet transmission with -packet delays. -IBC packet transmission with packet delays ensures that -packet-related data should be accepted only after some delay has passed since the corresponding header is installed. -This allows a correct relayer to intervene if the header is from a fork and shutdown the IBC handler, preventing damage at the application level. - -This TLA+ specification was used during the [design process](https://github.com/cosmos/cosmos-sdk/pull/7884) of the IBC connection-specified delay, where packet delay was a time duration. -Later, this design was augmented by adding a second delay parameter, in -terms of number of blocks; called [hybrid packet delay](https://github.com/cosmos/ibc/issues/539). - -## The Model of the Protocol - -We model a system where packet datagrams are both **submitted** by a -relayer and **handled** by a chain after a delay period has passed. -The system contains the following modules: -- [IBCPacketDelay.tla](IBCPacketDelay.tla), the main module, which -instantiates two chains and models the behavior of a correct relayer -as the environment where the two chains operate; -- [Chain.tla](Chain.tla), which models the behavior of a chain; -- [IBCPacketDelayDefinitions.tla](IBCPacketDelayDefinitions.tla), which contains definitions of operators that are shared between the - different modules; -- [ICS04PacketHandlers.tla](ICS04PacketHandlers.tla), which contains definitions of operators that specify packet transmission and packet datagram handling. - -### Timestamps - -To be able to enforce packet datagram submission and handling after a given delay, -we introduce a `timestamp` field in the chain store. -This `timestamp` is initially 1, and is incremented when a chain takes a step, that is, when it advances its height, or when it processes datagrams. - -Further, we need to keep track of the time when a counterparty client height -is installed on a chain. -That is, instead of keeping track of a set of counterparty client heights, in the -chain store, we store for each client height -the timestamp at which it was installed. -A counterparty client height whose timestamp is 0 has -not yet been installed on the chain. - - -### Relayer - -In this specification, the relayer is a part of the environment in which the two chains operate. -We define three actions that the environment (i.e., the relayer) can take: -- `UpdateClients`, which updates the counterparty client -heights of some chain. This action abstracts the -transmission of client datagrams. -- `CreateDatagrams`, which creates datagrams depending -on the packet log. This action scans the packet log and -adds the created packet datagram to the outgoing packet -datagram queue of the appropriate chain. -- `SubmitDatagramsWithDelay`, which submits datagrams if -delay has passed. This action scans the outgoing packet datagram queue -of a given chain, and -checks if the `proofHeight` of the datagram is a -client height that is installed on the chain. -The following cases are possible: - - if `proofHeight` is installed, then check if a `MaxDelay` period - has passed between the timestamp when the client height was - installed and the current `timestamp`, stored in the chain store. If - this is the case -- submit the datagram to the incoming packet - datagram queue of the chain; otherwise -- do nothing. - - if `proofHeight` is not installed, then install the it. - -### Packet handlers - -On the packet handling side, the chain also checks if the incoming -`PacketRecv` or `PacketAck` datagram has a valid `proofHeight` field. -This means that the `proofHeight` of the datagram should be installed on the -chain, and there should be `MaxDelay` period between the timestamp when the `proofHeight` was -installed and the current `timestamp` of the chain. - -### History variable - -We define a history variable, called `packetDatagramTimestamp`, where we store -for each `chainID` and each `proofHeight`, the timestamp of the chain `chainID` when a datagram with this `proofHeight` was processed. -We use this history variable in the invariant `PacketDatagramsDelay`, -described below. - - -## Invariants - -The module [IBCPacketDelay.tla](IBCPacketDelay.tla) defines the following invariants: -- `TypeOK`, the type invariant, -- `PacketDatagramsDelay`, which ensures that each packet -datagram is processed after a delay period. - -## Using the Model - -### Constants - -The module `IBCPacketDelay.tla` is parameterized by the constants: - - `MaxHeight`, a natural number denoting the maximal height of the chains, - - `ChannelOrdering`, a string denoting whether the channels are ordered or unordered, - - `MaxPacketSeq`, a natural number denoting the maximal packet sequence number - - `MaxDelay`, a natural number denoting the maximal packet delay - -### Importing the specification into TLA+ toolbox - -To import the specification in the TLA+ toolbox and run TLC: - - add a new spec in TLA+ toolbox with the root-module file `IBCPacketDelay.tla` - - create a model - - assign a value to the constants (example values can be found in `IBCPacketDelay.cfg`) - - choose "Temporal formula" as the behavior spec, and use the formula `Spec` - - choose invariants/properties that should be checked - - run TLC on the model - -#### Basic checks with TLC - -We ran TLC on `IBCPacketDelay.tla` using the constants defined -in `IBCPacketDelay.cfg`. -We were able to check the invariants described above within seconds. - -#### Apalache - -The specification contains type annotations for the -model checker [Apalache](https://github.com/informalsystems/apalache). -The specification passes the type check using the type checker [Snowcat](https://apalache.informal.systems/docs/apalache/typechecker-snowcat.html) -integrated in Apalache. - - diff --git a/e2e/e2e/__init__.py b/e2e/e2e/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/e2e/e2e/channel.py b/e2e/e2e/channel.py deleted file mode 100644 index 90921aff7b..0000000000 --- a/e2e/e2e/channel.py +++ /dev/null @@ -1,638 +0,0 @@ -from typing import Optional, Tuple -import toml - -from .cmd import * -from .common import * - -import e2e.relayer as relayer - -@dataclass -class TxChanOpenInitRes: - channel_id: ChannelId - connection_id: ConnectionId - counterparty_channel_id: Optional[ChannelId] - counterparty_port_id: PortId - height: BlockHeight - port_id: PortId - - -@cmd("tx raw chan-open-init") -@dataclass -class TxChanOpenInit(Cmd[TxChanOpenInitRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - connection_id: ConnectionId - dst_port_id: PortId - src_port_id: PortId - ordering: Optional[Ordering] = None - - def args(self) -> List[str]: - args = [self.dst_chain_id, self.src_chain_id, - self.connection_id, - self.dst_port_id, self.src_port_id] - - if self.ordering is not None: - args.extend(['--ordering', str(self.ordering)]) - - return args - - def process(self, result: Any) -> TxChanOpenInitRes: - return from_dict(TxChanOpenInitRes, result['OpenInitChannel']) - - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxChanOpenTryRes: - channel_id: ChannelId - connection_id: ConnectionId - counterparty_channel_id: ChannelId - counterparty_port_id: ChannelId - height: BlockHeight - port_id: PortId - - -@cmd("tx raw chan-open-try") -@dataclass -class TxChanOpenTry(Cmd[TxChanOpenTryRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - connection_id: ConnectionId - dst_port_id: PortId - src_port_id: PortId - src_channel_id: ChannelId - ordering: Optional[Ordering] = None - - def args(self) -> List[str]: - args = [self.dst_chain_id, self.src_chain_id, - self.connection_id, - self.dst_port_id, self.src_port_id, - "-s", self.src_channel_id] - - if self.ordering is not None: - args.extend(['--ordering', str(self.ordering)]) - - return args - - def process(self, result: Any) -> TxChanOpenTryRes: - return from_dict(TxChanOpenTryRes, result['OpenTryChannel']) - - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxChanOpenAckRes: - channel_id: ChannelId - connection_id: ConnectionId - counterparty_channel_id: ChannelId - counterparty_port_id: ChannelId - height: BlockHeight - port_id: PortId - - -@cmd("tx raw chan-open-ack") -@dataclass -class TxChanOpenAck(Cmd[TxChanOpenAckRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - connection_id: ConnectionId - dst_port_id: PortId - src_port_id: PortId - dst_channel_id: ChannelId - src_channel_id: ChannelId - - def args(self) -> List[str]: - args = [self.dst_chain_id, self.src_chain_id, - self.connection_id, - self.dst_port_id, self.src_port_id, - "-d", self.dst_channel_id, - "-s", self.src_channel_id] - - return args - - def process(self, result: Any) -> TxChanOpenAckRes: - return from_dict(TxChanOpenAckRes, result['OpenAckChannel']) - - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxChanOpenConfirmRes: - channel_id: ChannelId - connection_id: ConnectionId - counterparty_channel_id: ChannelId - counterparty_port_id: ChannelId - height: BlockHeight - port_id: PortId - - -@cmd("tx raw chan-open-confirm") -@dataclass -class TxChanOpenConfirm(Cmd[TxChanOpenConfirmRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - connection_id: ConnectionId - dst_port_id: PortId - src_port_id: PortId - dst_channel_id: ChannelId - src_channel_id: ChannelId - - def args(self) -> List[str]: - args = [self.dst_chain_id, self.src_chain_id, - self.connection_id, - self.dst_port_id, self.src_port_id, - "-d", self.dst_channel_id, - "-s", self.src_channel_id] - - return args - - def process(self, result: Any) -> TxChanOpenConfirmRes: - return from_dict(TxChanOpenConfirmRes, result['OpenConfirmChannel']) - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxChanCloseInitRes: - channel_id: ChannelId - connection_id: ConnectionId - counterparty_channel_id: ChannelId - counterparty_port_id: ChannelId - height: BlockHeight - port_id: PortId - - -@cmd("tx raw chan-close-init") -@dataclass -class TxChanCloseInit(Cmd[TxChanCloseInitRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - dst_conn_id: ConnectionId - dst_port_id: PortId - src_port_id: PortId - dst_chan_id: ChannelId - src_chan_id: ChannelId - - def args(self) -> List[str]: - args = [self.dst_chain_id, self.src_chain_id, - self.dst_conn_id, - self.dst_port_id, self.src_port_id, - "-d", self.dst_chan_id, - "-s", self.src_chan_id] - - return args - - def process(self, result: Any) -> TxChanCloseInitRes: - print(result) - return from_dict(TxChanCloseConfirmRes, result['CloseInitChannel']) - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxChanCloseConfirmRes: - channel_id: ChannelId - connection_id: ConnectionId - counterparty_channel_id: ChannelId - counterparty_port_id: ChannelId - height: BlockHeight - port_id: PortId - - -@cmd("tx raw chan-close-confirm") -@dataclass -class TxChanCloseConfirm(Cmd[TxChanCloseConfirmRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - dst_conn_id: ConnectionId - dst_port_id: PortId - src_port_id: PortId - dst_chan_id: ChannelId - src_chan_id: ChannelId - - def args(self) -> List[str]: - args = [self.dst_chain_id, self.src_chain_id, - self.dst_conn_id, - self.dst_port_id, self.src_port_id, - "-d", self.dst_chan_id, - "-s", self.src_chan_id] - - return args - - def process(self, result: Any) -> TxChanCloseConfirmRes: - print(result) - return from_dict(TxChanCloseConfirmRes, result['CloseConfirmChannel']) - - -# ----------------------------------------------------------------------------- - - -@ dataclass -class Remote: - channel_id: ChannelId - port_id: PortId - - -@ dataclass -class ChannelEnd: - connection_hops: List[Any] - ordering: str - remote: Remote - state: str - version: str - -@ dataclass -class ChannelEnds: - chain_id: str - client_id: str - connection_id: str - channel_id: str - port_id: str - - counterparty_chain_id: str - counterparty_client_id: str - counterparty_connection_id: str - counterparty_channel_id: str - counterparty_port_id: str - - -@ cmd("query channel end") -@ dataclass -class QueryChannelEnd(Cmd[ChannelEnd]): - chain_id: ChainId - port_id: PortId - channel_id: ChannelId - - def args(self) -> List[str]: - return [self.chain_id, self.port_id, self.channel_id] - - def process(self, result: Any) -> ChannelEnd: - return from_dict(ChannelEnd, result) - -@ cmd("query channel ends") -@ dataclass -class QueryChannelEnds(Cmd[ChannelEnds]): - chain_id: ChainId - port_id: PortId - channel_id: ChannelId - - def args(self) -> List[str]: - return [self.chain_id, self.port_id, self.channel_id] - - def process(self, result: Any) -> ChannelEnds: - return from_dict(ChannelEnds, result) - -# ============================================================================= -# CHANNEL handshake -# ============================================================================= - - -def chan_open_init(c: Config, - dst: ChainId, src: ChainId, - dst_conn: ConnectionId, - dst_port: PortId = PortId('transfer'), - src_port: PortId = PortId('transfer'), - ordering: Optional[Ordering] = None - ) -> ChannelId: - cmd = TxChanOpenInit(dst_chain_id=dst, src_chain_id=src, - connection_id=dst_conn, - dst_port_id=dst_port, src_port_id=src_port, - ordering=ordering) - - res = cmd.run(c).success() - l.info( - f'ChanOpenInit submitted to {dst} and obtained channel id {res.channel_id}') - return res.channel_id - - -def chan_open_try(c: Config, - dst: ChainId, - src: ChainId, - dst_conn: ConnectionId, - dst_port: PortId, - src_port: PortId, - src_chan: ChannelId, - ordering: Optional[Ordering] = None - ) -> ChannelId: - cmd = TxChanOpenTry(dst_chain_id=dst, src_chain_id=src, - connection_id=dst_conn, - dst_port_id=dst_port, src_port_id=src_port, - src_channel_id=src_chan, - ordering=ordering) - - res = cmd.run(c).success() - l.info( - f'ChanOpenTry submitted to {dst} and obtained channel id {res.channel_id}') - return res.channel_id - - -def chan_open_ack(c: Config, - dst: ChainId, src: ChainId, - dst_conn: ConnectionId, - dst_port: PortId, - src_port: PortId, - dst_chan: ChannelId, - src_chan: ChannelId, - ) -> ChannelId: - cmd = TxChanOpenAck(dst_chain_id=dst, src_chain_id=src, - connection_id=dst_conn, - dst_port_id=dst_port, src_port_id=src_port, - dst_channel_id=dst_chan, - src_channel_id=src_chan) - - res = cmd.run(c).success() - l.info( - f'ChanOpenAck submitted to {dst} and got channel id {res.channel_id}') - return res.channel_id - - -def chan_open_confirm( - c: Config, - dst: ChainId, - src: ChainId, - dst_conn: ConnectionId, - dst_port: PortId, - src_port: PortId, - dst_chan: ChannelId, - src_chan: ChannelId -) -> ChannelId: - cmd = TxChanOpenConfirm(dst_chain_id=dst, src_chain_id=src, - connection_id=dst_conn, - dst_port_id=dst_port, src_port_id=src_port, - dst_channel_id=dst_chan, - src_channel_id=src_chan) - - res = cmd.run(c).success() - l.info( - f'ChanOpenConfirm submitted to {dst} and got channel id {res.channel_id}') - return res.channel_id - -# ============================================================================= -# CHANNEL close -# ============================================================================= - - -def chan_close_init( - c: Config, - dst: ChainId, - src: ChainId, - dst_conn: ConnectionId, - dst_port: PortId, - src_port: PortId, - dst_chan: ChannelId, - src_chan: ChannelId -) -> ChannelId: - cmd = TxChanCloseInit(dst_chain_id=dst, src_chain_id=src, - dst_conn_id=dst_conn, - dst_port_id=dst_port, src_port_id=src_port, - dst_chan_id=dst_chan, - src_chan_id=src_chan) - - res = cmd.run(c).success() - l.info( - f'ChannelCloseInit submitted to {dst} and got channel id {res.channel_id}') - return res.channel_id - - -def chan_close_confirm( - c: Config, - dst: ChainId, - src: ChainId, - dst_conn: ConnectionId, - dst_port: PortId, - src_port: PortId, - dst_chan: ChannelId, - src_chan: ChannelId -) -> ChannelId: - cmd = TxChanCloseConfirm(dst_chain_id=dst, src_chain_id=src, - dst_conn_id=dst_conn, - dst_port_id=dst_port, src_port_id=src_port, - dst_chan_id=dst_chan, - src_chan_id=src_chan) - - res = cmd.run(c).success() - l.info( - f'ChannelCloseConfirm submitted to {dst} and got channel id {res.channel_id}') - return res.channel_id - - -def close( - c: Config, - dst: ChainId, - src: ChainId, - dst_conn: ConnectionId, - src_conn: ConnectionId, - dst_chan: ChannelId, - src_chan: ChannelId, - dst_port: PortId = PortId('transfer'), - src_port: PortId = PortId('transfer'), -): - chan_close_init(c, dst, src, dst_conn, dst_port, - src_port, dst_chan, src_chan) - - chan_close_confirm(c, src, dst, src_conn, src_port, - dst_port, src_chan, dst_chan) - - -# ============================================================================= -# CHANNEL handshake -# ============================================================================= - - -def handshake( - c: Config, - side_a: ChainId, side_b: ChainId, - conn_a: ConnectionId, conn_b: ConnectionId, - port_id: PortId -) -> Tuple[ChannelId, ChannelId]: - a_chan_id = chan_open_init(c, dst=side_a, src=side_b, dst_conn=conn_a) - - split() - - b_chan_id = chan_open_try( - c, dst=side_b, src=side_a, dst_conn=conn_b, dst_port=port_id, src_port=port_id, - src_chan=a_chan_id) - - split() - - ack_res = chan_open_ack(c, dst=side_a, src=side_b, dst_port=port_id, src_port=port_id, - dst_conn=conn_a, dst_chan=a_chan_id, src_chan=b_chan_id) - - if ack_res != a_chan_id: - l.error( - f'Incorrect channel id returned from chan open ack: expected={a_chan_id} got={ack_res}') - exit(1) - - confirm_res = chan_open_confirm( - c, dst=side_b, src=side_a, dst_port=port_id, src_port=port_id, - dst_conn=conn_b, dst_chan=b_chan_id, src_chan=a_chan_id) - - if confirm_res != b_chan_id: - l.error( - f'Incorrect channel id returned from chan open confirm: expected={b_chan_id} got={confirm_res}') - exit(1) - - split() - - a_chan_end = query_channel_end(c, side_a, port_id, a_chan_id) - if a_chan_end.state != 'Open': - l.error( - f'Channel end with id {a_chan_id} on chain {side_a} is not in Open state, got: {a_chan_end.state}') - exit(1) - - b_chan_end = query_channel_end(c, side_b, port_id, b_chan_id) - if b_chan_end.state != 'Open': - l.error( - f'Channel end with id {b_chan_id} on chain {side_b} is not in Open state, got: {b_chan_end.state}') - exit(1) - - a_chan_ends = query_channel_ends(c, side_a, port_id, a_chan_id) - l.debug(f'query channel ends result: {a_chan_ends}') - - assert a_chan_ends.chain_id == side_a - assert a_chan_ends.connection_id == conn_a - assert a_chan_ends.port_id == port_id - assert a_chan_ends.channel_id == a_chan_id - - assert a_chan_ends.counterparty_chain_id == side_b - assert a_chan_ends.counterparty_connection_id == conn_b - assert a_chan_ends.counterparty_port_id == port_id - assert a_chan_ends.counterparty_channel_id == b_chan_id - - b_chan_ends = query_channel_ends(c, side_b, port_id, b_chan_id) - l.debug(f'query channel ends result: {b_chan_ends}') - - assert b_chan_ends.chain_id == side_b - assert b_chan_ends.connection_id == conn_b - assert b_chan_ends.port_id == port_id - assert b_chan_ends.channel_id == b_chan_id - - assert b_chan_ends.counterparty_chain_id == side_a - assert b_chan_ends.counterparty_connection_id == conn_a - assert b_chan_ends.counterparty_port_id == port_id - assert b_chan_ends.counterparty_channel_id == a_chan_id - - return a_chan_id, b_chan_id - - -# ============================================================================= -# CHANNEL END query -# ============================================================================= - - -def query_channel_end(c: Config, chain_id: ChainId, port: PortId, chan_id: ChannelId) -> ChannelEnd: - cmd = QueryChannelEnd(chain_id, port, chan_id) - res = cmd.run(c).success() - - l.debug(f'Status of channel end {chan_id}: {res}') - - return res - - -# ============================================================================= -# CHANNEL ENDS query -# ============================================================================= - -def query_channel_ends(c: Config, chain_id: ChainId, port: PortId, chan_id: ChannelId) -> ChannelEnd: - cmd = QueryChannelEnds(chain_id, port, chan_id) - res = cmd.run(c).success() - - l.debug(f'Status of channel ends {chan_id}: {res}') - - return res - - -# ============================================================================= -# Passive CHANNEL relayer tests -# ============================================================================= - -def verify_state(c: Config, - ibc1: ChainId, ibc0: ChainId, - ibc1_chan_id: ChannelId, port_id: PortId): - - mode = toml.load(c.config_file)['mode'] - clients_enabled = mode['clients']['enabled'] - conn_enabled = mode['connections']['enabled'] - chan_enabled = mode['channels']['enabled'] - packets_enabled = mode['packets']['enabled'] - - # verify connection state on both chains, should be 'Open' or 'Init' depending on config 'mode' - if clients_enabled and conn_enabled and chan_enabled and packets_enabled: - sleep(10.0) - for i in range(20): - sleep(2.0) - ibc1_chan_end = query_channel_end(c, ibc1, port_id, ibc1_chan_id) - ibc0_chan_id = ibc1_chan_end.remote.channel_id - ibc0_chan_end = query_channel_end(c, ibc0, port_id, ibc0_chan_id) - if ibc0_chan_end.state == 'Open' and ibc1_chan_end.state == 'Open': - break - else: - assert (ibc0_chan_end.state == 'Open'), (ibc0_chan_end, "state is not Open") - assert (ibc1_chan_end.state == 'Open'), (ibc1_chan_end, "state is not Open") - - else: - sleep(5.0) - ibc1_chan_end = query_channel_end(c, ibc1, port_id, ibc1_chan_id) - assert (ibc1_chan_end.state == 'Init'), (ibc1_chan_end, "state is not Init") - - -def passive_channel_start_then_init(c: Config, - ibc1: ChainId, ibc0: ChainId, - ibc1_conn_id: ConnectionId, port_id: PortId): - - # 1. start hermes - proc = relayer.start(c) - sleep(2.0) - - # 2. create a channel in Init state - ibc1_chan_id = chan_open_init(c, dst=ibc1, src=ibc0, dst_conn=ibc1_conn_id) - - # 3. wait for channel handshake to finish and verify channel state on both chains - verify_state(c, ibc1, ibc0, ibc1_chan_id, port_id) - - # 4. All good, stop the relayer - proc.kill() - - -def passive_channel_init_then_start(c: Config, - ibc1: ChainId, ibc0: ChainId, - ibc1_conn_id: ConnectionId, port_id: PortId): - - # 1. create a channel in Init state - ibc1_chan_id = chan_open_init(c, dst=ibc1, src=ibc0, dst_conn=ibc1_conn_id) - sleep(2.0) - - # 2. start relaying - proc = relayer.start(c) - - # 3. wait for channel handshake to finish and verify channel state on both chains - verify_state(c, ibc1, ibc0, ibc1_chan_id, port_id) - - # 4. All good, stop the relayer - proc.kill() - - -def passive_channel_try_then_start(c: Config, - ibc1: ChainId, - ibc0: ChainId, - ibc1_conn_id: ConnectionId, - ibc0_conn_id: ConnectionId, - port_id: PortId): - - # 1. create a channel in Try state - ibc1_chan_id = chan_open_init(c, dst=ibc1, src=ibc0, dst_conn=ibc1_conn_id) - sleep(2.0) - ibc0_chan_id = chan_open_try(c, dst=ibc0, src=ibc1, dst_conn=ibc0_conn_id, src_port=port_id, dst_port=port_id, src_chan=ibc1_chan_id) - sleep(2.0) - - # 2. start relaying - proc = relayer.start(c) - - # 3. wait for channel handshake to finish and verify channel state on both chains - verify_state(c, ibc1, ibc0, ibc1_chan_id, port_id) - - # 4. All good, stop the relayer - proc.kill() diff --git a/e2e/e2e/client.py b/e2e/e2e/client.py deleted file mode 100644 index fd71a5ae18..0000000000 --- a/e2e/e2e/client.py +++ /dev/null @@ -1,132 +0,0 @@ -from typing import Optional, Tuple - -from .cmd import * -from .common import * - - -@dataclass -class ClientCreated: - client_id: ClientId - client_type: ClientType - consensus_height: Height - height: BlockHeight - - -@dataclass -@cmd("tx raw create-client") -class TxCreateClient(Cmd[ClientCreated]): - dst_chain_id: ChainId - src_chain_id: ChainId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id] - - def process(self, result: Any) -> ClientCreated: - return from_dict(ClientCreated, result['CreateClient']) - - -# ----------------------------------------------------------------------------- - - -@dataclass -class ClientUpdated: - client_id: ClientId - client_type: ClientType - consensus_height: Height - height: BlockHeight - - -@dataclass -@cmd("tx raw update-client") -class TxUpdateClient(Cmd[ClientUpdated]): - dst_chain_id: ChainId - dst_client_id: ClientId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.dst_client_id] - - def process(self, result: Any) -> ClientUpdated: - return from_dict(ClientUpdated, result[-1]['UpdateClient']['common']) - - -# ----------------------------------------------------------------------------- - -@dataclass -class AllowUpdate: - after_expiry: bool - after_misbehaviour: bool - - -@dataclass -class ClientState: - chain_id: ChainId - frozen_height: Optional[Height] - latest_height: Height - max_clock_drift: Duration - trust_level: TrustLevel - trusting_period: Duration - unbonding_period: Duration - upgrade_path: List[str] - allow_update: AllowUpdate - - -@dataclass -@cmd("query client state") -class QueryClientState(Cmd[ClientState]): - chain_id: ChainId - client_id: ClientId - height: Optional[int] = None - proof: bool = False - - def args(self) -> List[str]: - args = [] - - if self.height is not None: - args.extend(['--height', str(self.height)]) - if self.proof: - args.append('--proof') - - args.extend([self.chain_id, self.client_id]) - - return args - - def process(self, result: Any) -> ClientState: - return from_dict(ClientState, result) - -# ============================================================================= -# CLIENT creation and manipulation -# ============================================================================= - - -def create_client(c: Config, dst: ChainId, src: ChainId) -> ClientCreated: - cmd = TxCreateClient(dst_chain_id=dst, src_chain_id=src) - client = cmd.run(c).success() - l.info(f'Created client: {client.client_id}') - return client - - -def update_client(c: Config, dst: ChainId, client_id: ClientId) -> ClientUpdated: - cmd = TxUpdateClient(dst_chain_id=dst, - dst_client_id=client_id) - res = cmd.run(c).success() - l.info(f'Updated client to: {res.consensus_height}') - return res - - -def query_client_state(c: Config, chain_id: ChainId, client_id: ClientId) -> Tuple[ClientId, ClientState]: - cmd = QueryClientState(chain_id, client_id) - res = cmd.run(c).success() - l.debug(f'State of client {client_id} is: {res}') - return client_id, res - - -def create_update_query_client(c: Config, dst: ChainId, src: ChainId) -> ClientId: - client = create_client(c, dst, src) - split() - query_client_state(c, dst, client.client_id) - split() - update_client(c, dst, client.client_id) - split() - query_client_state(c, dst, client.client_id) - split() - return client.client_id diff --git a/e2e/e2e/cmd.py b/e2e/e2e/cmd.py deleted file mode 100644 index 6750aab58b..0000000000 --- a/e2e/e2e/cmd.py +++ /dev/null @@ -1,106 +0,0 @@ -#!/usr/bin/env python3 - -import json -import logging as l -import subprocess -from dataclasses import dataclass, fields as datafields, is_dataclass -from pathlib import Path -from typing import Any, List, TypeVar, Generic, Type, Callable - - -@dataclass -class Config: - config_file: Path - relayer_cmd: str - log_level: str - max_retries: int = 10 - - -T = TypeVar('T') - - -@dataclass -class CmdResult(Generic[T]): - cmd: 'Cmd' - config: Config - result: Any - retries: int = 0 - - def success(self) -> T: - status = self.result.get('status') or 'unknown' - result = self.result.get('result') or {} - - if status == "success": - data = self.cmd.process(result) - l.debug(str(data)) - return data - elif self.retries < self.config.max_retries: - left = self.config.max_retries - self.retries - l.warn(f'Command failed: retrying (retries left: {left})') - return self.cmd.retry(self.config, self.retries).success() - else: - raise ExpectedSuccess(self.cmd, status, result) - - -class Cmd(Generic[T]): - name: str - - def process(self, result: Any) -> Any: - raise NotImplementedError("Cmd::process") - - def args(self) -> List[str]: - raise NotImplementedError("Cmd::args") - - def to_cmd(self) -> str: - return f"{self.name} {' '.join(self.args())}" - - def run(self, config: Config, retries: int = 0) -> CmdResult[T]: - full_cmd = f'{config.relayer_cmd} -c {config.config_file} --json'.split(' ') - full_cmd.extend(self.name.split(' ')) - full_cmd.extend(self.args()) - l.debug(' '.join(full_cmd)) - - res = subprocess.run(full_cmd, capture_output=True, text=True) - lines = res.stdout.splitlines() - last_line = ''.join(lines[-1:]) - l.debug(last_line) - - return CmdResult(cmd=self, config=config, retries=retries, result=json.loads(last_line)) - - def retry(self, config: Config, retries: int) -> CmdResult[T]: - return self.run(config, retries + 1) - - -C = TypeVar('C', bound=Cmd) - - -def cmd(name: str) -> Callable[[Type[C]], Type[C]]: - def decorator(klass: Type[C]) -> Type[C]: - klass.name = name - return klass - - return decorator - - -def from_dict(klass, dikt) -> Any: - if is_dataclass(klass): - fields = datafields(klass) - args = {f.name: from_dict(f.type, dikt[f.name]) for f in fields} - return klass(**args) - else: - return dikt - - -class ExpectedSuccess(Exception): - cmd: Any - status: str - result: Any - - def __init__(self, cmd: Any, status: str, result: Any) -> None: - self.cmd = cmd - self.status = status - self.result = result - - super().__init__( - f"Command '{cmd}' failed. Expected 'success', got '{status}'. Message: {result}" - ) diff --git a/e2e/e2e/common.py b/e2e/e2e/common.py deleted file mode 100644 index 10b96b7f51..0000000000 --- a/e2e/e2e/common.py +++ /dev/null @@ -1,44 +0,0 @@ -from dataclasses import dataclass -from enum import Enum -from time import sleep -from typing import NewType - - -@dataclass -class Height: - revision_height: int - revision_number: int - - -@dataclass -class Duration: - nanos: int - secs: int - - -@dataclass -class TrustLevel: - denominator: int - numerator: int - - -class Ordering(Enum): - UNORDERED = 'UNORDERED' - ORDERED = 'ORDERED' - - -PortId = NewType('PortId', str) -ChainId = NewType('ChainId', str) -ClientId = NewType('ClientId', str) -ChannelId = NewType('ChannelId', str) -ConnectionId = NewType('ConnectionId', str) - -Hex = NewType('Hex', str) -Sequence = NewType('Sequence', str) -Timestamp = NewType('Timestamp', int) -ClientType = NewType('ClientType', str) -BlockHeight = NewType('BlockHeight', str) - -def split(): - sleep(0.5) - print() diff --git a/e2e/e2e/connection.py b/e2e/e2e/connection.py deleted file mode 100644 index 834cfb6440..0000000000 --- a/e2e/e2e/connection.py +++ /dev/null @@ -1,346 +0,0 @@ -from typing import Tuple -import toml - -from .cmd import * -from .common import * - -import e2e.relayer as relayer - - - -@dataclass -class TxConnInitRes: - connection_id: ConnectionId - - -@cmd("tx raw conn-init") -@dataclass -class TxConnInit(Cmd[TxConnInitRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - dst_client_id: ClientId - src_client_id: ClientId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id, - self.dst_client_id, self.src_client_id] - - def process(self, result: Any) -> TxConnInitRes: - return from_dict(TxConnInitRes, result['OpenInitConnection']) - - -# ----------------------------------------------------------------------------- - -@dataclass -class TxConnTryRes: - connection_id: ConnectionId - - -@cmd("tx raw conn-try") -@dataclass -class TxConnTry(Cmd[TxConnTryRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - dst_client_id: ClientId - src_client_id: ClientId - src_conn_id: ConnectionId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id, - self.dst_client_id, self.src_client_id, - "-s", self.src_conn_id] - - def process(self, result: Any) -> TxConnTryRes: - return from_dict(TxConnTryRes, result['OpenTryConnection']) - - -# ----------------------------------------------------------------------------- - -@dataclass -class TxConnAckRes: - connection_id: ConnectionId - - -@cmd("tx raw conn-ack") -@dataclass -class TxConnAck(Cmd[TxConnAckRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - dst_client_id: ClientId - src_client_id: ClientId - dst_conn_id: ConnectionId - src_conn_id: ConnectionId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id, - self.dst_client_id, self.src_client_id, - "-d", self.dst_conn_id, - "-s", self.src_conn_id] - - def process(self, result: Any) -> TxConnAckRes: - return from_dict(TxConnAckRes, result['OpenAckConnection']) - - -# ----------------------------------------------------------------------------- - -@dataclass -class TxConnConfirmRes: - connection_id: ConnectionId - - -@cmd("tx raw conn-confirm") -@dataclass -class TxConnConfirm(Cmd[TxConnConfirmRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - dst_client_id: ClientId - src_client_id: ClientId - dst_conn_id: ConnectionId - src_conn_id: ConnectionId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id, - self.dst_client_id, self.src_client_id, - "-d", self.dst_conn_id, - "-s", self.src_conn_id] - - def process(self, result: Any) -> TxConnConfirmRes: - return from_dict(TxConnConfirmRes, result['OpenConfirmConnection']) - - -# ----------------------------------------------------------------------------- - -@dataclass -class Version: - features: List[str] - identifier: str - - -@dataclass -class Counterparty: - client_id: ClientId - connection_id: ConnectionId - prefix: str - - -@dataclass -class ConnectionEnd: - client_id: ClientId - counterparty: Counterparty - delay_period: int - state: str - versions: List[Version] - - -@cmd("query connection end") -@dataclass -class QueryConnectionEnd(Cmd[ConnectionEnd]): - chain_id: ChainId - connection_id: ConnectionId - - def args(self) -> List[str]: - return [self.chain_id, self.connection_id] - - def process(self, result: Any) -> ConnectionEnd: - return from_dict(ConnectionEnd, result) - - -# ============================================================================= -# CONNECTION handshake -# ============================================================================= - - -def conn_init(c: Config, - dst: ChainId, src: ChainId, - dst_client: ClientId, src_client: ClientId - ) -> ConnectionId: - cmd = TxConnInit(dst_chain_id=dst, src_chain_id=src, - dst_client_id=dst_client, src_client_id=src_client) - res = cmd.run(c).success() - l.info( - f'ConnOpen init submitted to {dst} and obtained connection id {res.connection_id}') - return res.connection_id - - -def conn_try(c: Config, - dst: ChainId, src: ChainId, - dst_client: ClientId, src_client: ClientId, - src_conn: ConnectionId - ) -> ConnectionId: - cmd = TxConnTry(dst_chain_id=dst, src_chain_id=src, dst_client_id=dst_client, src_client_id=src_client, - src_conn_id=src_conn) - res = cmd.run(c).success() - l.info( - f'ConnOpen try submitted to {dst} and obtained connection id {res.connection_id}') - return res.connection_id - - -def conn_ack(c: Config, - dst: ChainId, src: ChainId, - dst_client: ClientId, src_client: ClientId, - dst_conn: ConnectionId, src_conn: ConnectionId - ) -> ConnectionId: - cmd = TxConnAck(dst_chain_id=dst, src_chain_id=src, dst_client_id=dst_client, src_client_id=src_client, - dst_conn_id=dst_conn, src_conn_id=src_conn) - res = cmd.run(c).success() - l.info( - f'ConnOpen ack submitted to {dst} and obtained connection id {res.connection_id}') - return res.connection_id - - -def conn_confirm(c: Config, - dst: ChainId, src: ChainId, - dst_client: ClientId, src_client: ClientId, - dst_conn: ConnectionId, src_conn: ConnectionId - ) -> ConnectionId: - cmd = TxConnConfirm(dst_chain_id=dst, src_chain_id=src, dst_client_id=dst_client, src_client_id=src_client, - dst_conn_id=dst_conn, src_conn_id=src_conn) - res = cmd.run(c).success() - l.info( - f'ConnOpen confirm submitted to {dst} and obtained connection id {res.connection_id}') - return res.connection_id - - -def handshake(c: Config, - side_a: ChainId, side_b: ChainId, - client_a: ClientId, client_b: ClientId - ) -> Tuple[ConnectionId, ConnectionId]: - a_conn_id = conn_init(c, side_a, side_b, client_a, client_b) - split() - b_conn_id = conn_try(c, side_b, side_a, client_b, client_a, a_conn_id) - split() - ack_res = conn_ack( - c, side_a, side_b, client_a, client_b, a_conn_id, b_conn_id) - - if ack_res != a_conn_id: - l.error( - f'Incorrect connection id returned from conn ack: expected=({a_conn_id})/got=({ack_res})') - exit(1) - - split() - - confirm_res = conn_confirm( - c, side_b, side_a, client_b, client_a, b_conn_id, a_conn_id) - - if confirm_res != b_conn_id: - l.error( - f'Incorrect connection id returned from conn confirm: expected=({b_conn_id})/got=({confirm_res})') - exit(1) - - a_conn_end = query_connection_end(c, side_a, a_conn_id) - if a_conn_end.state != 'Open': - l.error( - f'Connection end with id {a_conn_id} is not in Open state, got: {a_conn_end.state}') - exit(1) - - b_conn_end = query_connection_end(c, side_b, b_conn_id) - if b_conn_end.state != 'Open': - l.error( - f'Connection end with id {b_conn_id} is not in Open state, got: {b_conn_end.state}') - exit(1) - - return a_conn_id, b_conn_id - - -# ============================================================================= -# CONNECTION END query -# ============================================================================= - - -def query_connection_end(c: Config, chain_id: ChainId, conn_id: ConnectionId) -> ConnectionEnd: - cmd = QueryConnectionEnd(chain_id, conn_id) - res = cmd.run(c).success() - - l.debug(f'Status of connection end {conn_id}: {res}') - - return res - -# ============================================================================= -# Passive CONNECTION relayer tests -# ============================================================================= - -def verify_state(c: Config, - ibc1: ChainId, ibc0: ChainId, - ibc1_conn_id: ConnectionId): - - mode = toml.load(c.config_file)['mode'] - clients_enabled = mode['clients']['enabled'] - conn_enabled = mode['connections']['enabled'] - chan_enabled = mode['channels']['enabled'] - packets_enabled = mode['packets']['enabled'] - - # verify connection state on both chains, should be 'Open' or 'Init' depending on config 'mode' - if clients_enabled and conn_enabled and chan_enabled and packets_enabled: - sleep(10.0) - for i in range(20): - sleep(5.0) - ibc1_conn_end = query_connection_end(c, ibc1, ibc1_conn_id) - ibc0_conn_id = ibc1_conn_end.counterparty.connection_id - ibc0_conn_end = query_connection_end(c, ibc0, ibc0_conn_id) - if ibc0_conn_end.state == 'Open' and ibc1_conn_end.state == 'Open': - break - else: - assert (ibc0_conn_end.state == 'Open'), (ibc0_conn_end, "state is not Open") - assert (ibc1_conn_end.state == 'Open'), (ibc1_conn_end, "state is not Open") - - else: - sleep(5.0) - ibc1_conn_end = query_connection_end(c, ibc1, ibc1_conn_id) - assert (ibc1_conn_end.state == 'Init'), (ibc1_conn_end, "state is not Init") - -def passive_connection_start_then_init(c: Config, - ibc1: ChainId, ibc0: ChainId, - ibc1_client_id: ClientId, ibc0_client_id: ClientId )-> ConnectionId: - - # 1. start hermes - proc = relayer.start(c) - sleep(2.0) - - # 2. create a connection in Init state - ibc1_conn_id_a = conn_init(c, dst=ibc1, src=ibc0, dst_client=ibc1_client_id, src_client=ibc0_client_id) - - # 3. wait for connection handshake to finish and verify connection state on both chains - verify_state(c, ibc1, ibc0, ibc1_conn_id_a) - - # 4. All good, stop the relayer - proc.kill() - - return ibc1_conn_id_a - -def passive_connection_init_then_start(c: Config, - ibc1: ChainId, ibc0: ChainId, - ibc1_client_id: ClientId, ibc0_client_id: ClientId ): - - # 1. create a connection in Init state - ibc1_conn_id_a = conn_init(c, dst=ibc1, src=ibc0, dst_client=ibc1_client_id, src_client=ibc0_client_id) - - # 2. start hermes - proc = relayer.start(c) - sleep(10.0) - - # 3. wait for connection handshake to finish and verify connection state on both chains - verify_state(c, ibc1, ibc0, ibc1_conn_id_a) - - # 4. All good, stop the relayer - proc.kill() - - -def passive_connection_try_then_start(c: Config, - ibc1: ChainId, ibc0: ChainId, - ibc1_client_id: ClientId, ibc0_client_id: ClientId ): - - # 1. create a connection in Init state - ibc1_conn_id_a = conn_init(c, dst=ibc1, src=ibc0, dst_client=ibc1_client_id, src_client=ibc0_client_id) - - # 2. create a connection in Try-Open state - ibc0_conn_id_b = conn_try(c, dst=ibc0, src=ibc1, dst_client=ibc0_client_id, src_client=ibc1_client_id, src_conn=ibc1_conn_id_a) - - # 2. start hermes - proc = relayer.start(c) - sleep(10.0) - - # 3. wait for connection handshake to finish and verify connection state on both chains - verify_state(c, ibc1, ibc0, ibc1_conn_id_a) - - # 4. All good, stop the relayer - proc.kill() \ No newline at end of file diff --git a/e2e/e2e/packet.py b/e2e/e2e/packet.py deleted file mode 100644 index 278aaf1f04..0000000000 --- a/e2e/e2e/packet.py +++ /dev/null @@ -1,328 +0,0 @@ -from typing import Optional - -from .cmd import * -from .common import * - - -@dataclass -class Packet: - sequence: Sequence - source_port: PortId - source_channel: ChannelId - destination_port: PortId - destination_channel: ChannelId - data: Hex - timeout_height: Height - timeout_timestamp: Timestamp - - -@dataclass -class TxPacketSendRes: - height: BlockHeight - packet: Packet - - -@cmd("tx raw ft-transfer") -@dataclass -class TxPacketSend(Cmd[TxPacketSendRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - src_port: PortId - src_channel: ChannelId - amount: int - height_offset: int - number_msgs: Optional[int] = None - key: Optional[str] = None - - def args(self) -> List[str]: - args = [ - self.dst_chain_id, - self.src_chain_id, - self.src_port, - self.src_channel, - str(self.amount), - "-o", str(self.height_offset), - ] - - if self.number_msgs != None: - args.extend(['-n', str(self.number_msgs)]) - - if self.key != None: - args.extend(['-k', str(self.key)]) - - return args - - def process(self, result: Any) -> TxPacketSendRes: - entry = find_entry(result, 'SendPacket') - return from_dict(TxPacketSendRes, entry) - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxPacketRecvRes: - height: BlockHeight - packet: Packet - ack: Hex - - -@cmd("tx raw packet-recv") -@dataclass -class TxPacketRecv(Cmd[TxPacketRecvRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - src_port: PortId - src_channel: ChannelId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id, self.src_port, self.src_channel] - - def process(self, result: Any) -> TxPacketRecvRes: - entry = find_entry(result, 'WriteAcknowledgement') - return from_dict(TxPacketRecvRes, entry) - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxPacketTimeoutRes: - height: BlockHeight - packet: Packet - - -@cmd("tx raw packet-recv") -@dataclass -class TxPacketTimeout(Cmd[TxPacketTimeoutRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - src_port: PortId - src_channel: ChannelId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id, self.src_port, self.src_channel] - - def process(self, result: Any) -> TxPacketTimeoutRes: - entry = find_entry(result, 'TimeoutPacket') - return from_dict(TxPacketTimeoutRes, entry) - - -# ----------------------------------------------------------------------------- - - -@dataclass -class TxPacketAckRes: - height: BlockHeight - packet: Packet - - -@cmd("tx raw packet-ack") -@dataclass -class TxPacketAck(Cmd[TxPacketAckRes]): - dst_chain_id: ChainId - src_chain_id: ChainId - src_port: PortId - src_channel: ChannelId - - def args(self) -> List[str]: - return [self.dst_chain_id, self.src_chain_id, self.src_port, self.src_channel] - - def process(self, result: Any) -> TxPacketAckRes: - entry = find_entry(result, 'AcknowledgePacket') - return from_dict(TxPacketAckRes, entry) - - -# ----------------------------------------------------------------------------- - -@cmd("query packet unreceived-packets") -@dataclass -class QueryUnreceivedPackets(Cmd[List[int]]): - chain: ChainId - port: PortId - channel: ChannelId - - def args(self) -> List[str]: - return [self.chain, self.port, self.channel] - - def process(self, result: Any) -> List[int]: - return from_dict(List[int], result) - - -def query_unreceived_packets( - c: Config, - chain: ChainId, - port: PortId, - channel: ChannelId, -) -> List[int]: - cmd = QueryUnreceivedPackets( - chain=chain, port=port, channel=channel) - - return cmd.run(c).success() - -# ----------------------------------------------------------------------------- - - -@cmd("query packet unreceived-acks") -@dataclass -class QueryUnreceivedAcks(Cmd[List[int]]): - chain: ChainId - port: PortId - channel: ChannelId - - def args(self) -> List[str]: - return [self.chain, self.port, self.channel] - - def process(self, result: Any) -> List[int]: - return from_dict(List[int], result) - - -def query_unreceived_acks( - c: Config, - chain: ChainId, - port: PortId, - channel: ChannelId, -) -> List[int]: - cmd = QueryUnreceivedAcks( - chain=chain, port=port, channel=channel) - - return cmd.run(c).success() - - -# TRANSFER (packet send) -# ============================================================================= - - -def packet_send(c: Config, src: ChainId, dst: ChainId, - src_port: PortId, src_channel: ChannelId, - amount: int, height_offset: int, number_msgs: Optional[int] = None, - key: Optional[str] = 'user2') -> Packet: - - cmd = TxPacketSend(dst_chain_id=dst, src_chain_id=src, - src_port=src_port, src_channel=src_channel, - amount=amount, - number_msgs=number_msgs, - height_offset=height_offset, - key=key) - - res = cmd.run(c).success() - l.info( - f'PacketSend to {src} and obtained sequence number {res.packet.sequence}') - - return res.packet - - -def packet_recv(c: Config, dst: ChainId, src: ChainId, src_port: PortId, src_channel: ChannelId) -> Packet: - cmd = TxPacketRecv(dst_chain_id=dst, src_chain_id=src, - src_port=src_port, src_channel=src_channel) - - res = cmd.run(c).success() - l.info( - f'PacketRecv to {dst} done for sequence number {res.packet.sequence}') - - return res.packet - - -def packet_timeout(c: Config, dst: ChainId, src: ChainId, src_port: PortId, src_channel: ChannelId) -> Packet: - cmd = TxPacketTimeout(dst_chain_id=dst, src_chain_id=src, - src_port=src_port, src_channel=src_channel) - - res = cmd.run(c).success() - l.info( - f'Timeout to {src} done for sequence number {res.packet.sequence}') - - return res.packet - - -def packet_ack(c: Config, dst: ChainId, src: ChainId, src_port: PortId, src_channel: ChannelId) -> Packet: - cmd = TxPacketAck(dst_chain_id=dst, src_chain_id=src, - src_port=src_port, src_channel=src_channel) - - res = cmd.run(c).success() - l.info( - f'PacketAck to {dst} done for sequence number {res.packet.sequence}') - - return res.packet - - -def ping_pong(c: Config, - side_a: ChainId, side_b: ChainId, - a_chan: ChannelId, b_chan: ChannelId, - port_id: PortId = PortId('transfer')): - - pkt_send_a = packet_send(c, side_a, side_b, port_id, - a_chan, amount=9999, height_offset=1000) - - split() - - pkt_recv_b = packet_recv(c, side_b, side_a, port_id, a_chan) - - if pkt_send_a.sequence != pkt_recv_b.sequence: - l.error( - f'Mismatched sequence numbers for path {side_a} -> {side_b} : Sent={pkt_send_a.sequence} versus Received={pkt_recv_b.sequence}') - - split() - - # write the ack - pkt_ack_a = packet_ack(c, side_a, side_b, port_id, b_chan) - - if pkt_recv_b.sequence != pkt_ack_a.sequence: - l.error( - f'Mismatched sequence numbers for ack on path {side_a} -> {side_b} : Recv={pkt_recv_b.sequence} versus Ack={pkt_ack_a.sequence}') - - split() - - pkt_send_b = packet_send(c, side_b, side_a, port_id, - b_chan, amount=9999, height_offset=1000) - - split() - - pkt_recv_a = packet_recv(c, side_a, side_b, port_id, b_chan) - - if pkt_send_b.sequence != pkt_recv_a.sequence: - l.error( - f'Mismatched sequence numbers for path {side_b} -> {side_a} : Sent={pkt_send_b.sequence} versus Received={pkt_recv_a.sequence}') - - split() - - pkt_ack_b = packet_ack(c, side_b, side_a, port_id, a_chan) - - if pkt_recv_a.sequence != pkt_ack_b.sequence: - l.error( - f'Mismatched sequence numbers for ack on path {side_a} -> {side_b} : Recv={pkt_recv_a.sequence} versus Ack={pkt_ack_b.sequence}') - - -def timeout(c: Config, - side_a: ChainId, side_b: ChainId, - a_chan: ChannelId, b_chan: ChannelId, - port_id: PortId = PortId('transfer')): - - pkt_send_a = packet_send(c, side_a, side_b, port_id, - a_chan, amount=9999, height_offset=1) - - split() - - pkt_timeout_a = packet_timeout(c, side_b, side_a, port_id, a_chan) - - if pkt_send_a.sequence != pkt_timeout_a.sequence: - l.error( - f'Mismatched sequence numbers for path {side_a} -> {side_b} : Sent={pkt_send_a.sequence} versus Timeout={pkt_timeout_a.sequence}') - - split() - - pkt_send_b = packet_send(c, side_b, side_a, port_id, - b_chan, amount=9999, height_offset=1) - - split() - - pkt_timeout_b = packet_timeout(c, side_a, side_b, port_id, b_chan) - - if pkt_send_b.sequence != pkt_timeout_b.sequence: - l.error( - f'Mismatched sequence numbers for path {side_b} -> {side_a} : Sent={pkt_send_b.sequence} versus Timeout={pkt_timeout_b.sequence}') - - split() - - -def find_entry(result: Any, key: str) -> Any: - for entry in result: - if key in entry: - return entry[key] diff --git a/e2e/e2e/relayer.py b/e2e/e2e/relayer.py deleted file mode 100644 index 1f72d09c11..0000000000 --- a/e2e/e2e/relayer.py +++ /dev/null @@ -1,11 +0,0 @@ - -from subprocess import Popen -import logging as l - -from .cmd import Config - - -def start(c: Config) -> Popen: - full_cmd = f'{c.relayer_cmd} -c {c.config_file} -j start'.split(' ') - l.debug(' '.join(full_cmd)) - return Popen(full_cmd) diff --git a/e2e/pyrightconfig.json b/e2e/pyrightconfig.json deleted file mode 100644 index 2c63c08510..0000000000 --- a/e2e/pyrightconfig.json +++ /dev/null @@ -1,2 +0,0 @@ -{ -} diff --git a/e2e/run.py b/e2e/run.py deleted file mode 100755 index 721be3f386..0000000000 --- a/e2e/run.py +++ /dev/null @@ -1,277 +0,0 @@ -#!/usr/bin/env python3 - -import argparse -import logging as l -from typing import Tuple -from pathlib import Path -import toml - -import e2e.channel as channel -import e2e.client as client -import e2e.connection as connection -import e2e.packet as packet -import e2e.relayer as relayer -from e2e.cmd import Config -from e2e.common import * - - -def passive_packets( - c: Config, - ibc0: ChainId, ibc1: ChainId, port_id: PortId, - ibc0_channel_id: ChannelId, ibc1_channel_id: ChannelId): - - # 1. create some unreceived acks - - # hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 10000 -o 1000 -n 2 - packet.packet_send(c, src=ibc0, dst=ibc1, src_port=port_id, - src_channel=ibc0_channel_id, amount=10000, height_offset=1000, number_msgs=2) - - # hermes tx raw ft-transfer ibc-0 ibc-1 transfer channel-1 10000 -o 1000 -n 2 - packet.packet_send(c, src=ibc1, dst=ibc0, src_port=port_id, - src_channel=ibc1_channel_id, amount=10000, height_offset=1000, number_msgs=2) - sleep(5.0) - - # hermes tx raw packet-recv ibc-1 ibc-0 transfer channel-0 - packet.packet_recv(c, src=ibc0, dst=ibc1, - src_port=port_id, src_channel=ibc0_channel_id) - - # hermes tx raw packet-recv ibc-0 ibc-1 transfer channel-1 - packet.packet_recv(c, src=ibc1, dst=ibc0, - src_port=port_id, src_channel=ibc1_channel_id) - - # 2. create some unreceived packets - - # hermes tx raw ft-transfer ibc-0 ibc-1 transfer channel-1 10000 -o 1000 -n 3 - packet.packet_send(c, src=ibc1, dst=ibc0, src_port=port_id, - src_channel=ibc1_channel_id, amount=10000, height_offset=1000, number_msgs=3) - - # hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 10000 -o 1000 -n 4 - packet.packet_send(c, src=ibc0, dst=ibc1, src_port=port_id, - src_channel=ibc0_channel_id, amount=10000, height_offset=1000, number_msgs=4) - - sleep(10.0) - - # 3. verify the expected number of unreceived packets and acks on each channel end - - # hermes query packet unreceived-packets ibc-0 transfer channel-0 - unreceived = packet.query_unreceived_packets( - c, chain=ibc0, port=port_id, channel=ibc0_channel_id) - - assert (len(unreceived) == 3), (unreceived, "unreceived packet mismatch") - - # hermes query packet unreceived-acks ibc-1 transfer channel-1 - unreceived = packet.query_unreceived_acks( - c, chain=ibc1, port=port_id, channel=ibc1_channel_id) - - assert (len(unreceived) == 2), (unreceived, "unreceived packet mismatch") - - # hermes query packet unreceived-packets ibc-1 transfer channel-1 - unreceived = packet.query_unreceived_packets( - c, chain=ibc1, port=port_id, channel=ibc1_channel_id) - - assert (len(unreceived) == 4), (unreceived, "unreceived packet mismatch") - - # hermes query packet unreceived-acks ibc-0 transfer channel-0 - unreceived = packet.query_unreceived_acks( - c, chain=ibc0, port=port_id, channel=ibc0_channel_id) - - assert (len(unreceived) == 2), (unreceived, "unreceived packet mismatch") - - # 4. start relaying - it should clear the unreceived packets - proc = relayer.start(c) - - # 5. wait for the relayer to initialize and pick up pending packets - sleep(20.0) - - # 6. verify that there are no pending packets - # hermes query packet unreceived-packets ibc-1 transfer channel-1 - unreceived = packet.query_unreceived_packets( - c, chain=ibc1, port=port_id, channel=ibc1_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived packets mismatch (expected 0)") - - # hermes query packet unreceived-acks ibc-1 transfer channel-1 - unreceived = packet.query_unreceived_acks( - c, chain=ibc1, port=port_id, channel=ibc1_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived acks mismatch (expected 0)") - - # hermes query packet unreceived-packets ibc-0 transfer channel-0 - unreceived = packet.query_unreceived_packets( - c, chain=ibc0, port=port_id, channel=ibc0_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived packets mismatch (expected 0)") - - # hermes query packet unreceived-acks ibc-0 transfer channel-0 - unreceived = packet.query_unreceived_acks( - c, chain=ibc0, port=port_id, channel=ibc0_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived acks mismatch (expected 0)") - - # 7. send some packets - # hermes tx raw ft-transfer ibc-0 ibc-1 transfer channel-1 10000 1000 -n 3 - packet.packet_send(c, src=ibc1, dst=ibc0, src_port=port_id, - src_channel=ibc1_channel_id, amount=10000, height_offset=1000, number_msgs=3) - - # hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 10000 1000 -n 4 - packet.packet_send(c, src=ibc0, dst=ibc1, src_port=port_id, - src_channel=ibc0_channel_id, amount=10000, height_offset=1000, number_msgs=4) - - sleep(20.0) - - # 8. verify that there are no pending packets - # hermes query packet unreceived-packets ibc-1 transfer channel-1 - unreceived = packet.query_unreceived_packets( - c, chain=ibc1, port=port_id, channel=ibc1_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived packets mismatch (expected 0)") - - # hermes query packet unreceived-acks ibc-1 transfer channel-1 - unreceived = packet.query_unreceived_acks( - c, chain=ibc1, port=port_id, channel=ibc1_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived acks mismatch (expected 0)") - - # hermes query packet unreceived-packets ibc-0 transfer channel-0 - unreceived = packet.query_unreceived_packets( - c, chain=ibc0, port=port_id, channel=ibc0_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived packets mismatch (expected 0)") - - # hermes query packet unreceived-acks ibc-0 transfer channel-0 - unreceived = packet.query_unreceived_acks( - c, chain=ibc0, port=port_id, channel=ibc0_channel_id) - - assert (len(unreceived) == 0), (unreceived, - "unreceived acks mismatch (expected 0)") - - # 9.Stop the relayer - proc.kill() - - -def raw(c: Config, ibc0: ChainId, ibc1: ChainId, port_id: PortId) -> Tuple[ClientId, ConnectionId, ChannelId, ClientId, ConnectionId, ChannelId]: - ibc0_client_id = client.create_update_query_client(c, ibc0, ibc1) - - # Allocate first IDs on ibc-1 - ibc1_client_id = client.create_update_query_client(c, ibc1, ibc0) - ibc1_conn_id = connection.conn_init( - c, ibc1, ibc0, ibc1_client_id, ibc0_client_id) - ibc1_chan_id = channel.chan_open_init( - c, dst=ibc1, src=ibc0, dst_conn=ibc1_conn_id) - - ibc1_client_id = client.create_update_query_client(c, ibc1, ibc0) - - split() - - ibc0_conn_id, ibc1_conn_id = connection.handshake( - c, ibc0, ibc1, ibc0_client_id, ibc1_client_id) - - split() - - ibc0_chan_id, ibc1_chan_id = channel.handshake( - c, ibc0, ibc1, ibc0_conn_id, ibc1_conn_id, port_id) - - split() - - packet.ping_pong(c, ibc0, ibc1, ibc0_chan_id, ibc1_chan_id) - - split() - - sleep(5) - - packet.timeout(c, ibc0, ibc1, ibc0_chan_id, ibc1_chan_id) - - split() - - # The ChannelCloseInit message is currently denied by Gaia, - # and requires a patch to be accepted. - # channel.close(c, ibc0, ibc1, ibc0_conn_id, - # ibc1_conn_id, ibc0_chan_id, ibc1_chan_id) - - return ibc0_client_id, ibc0_conn_id, ibc0_chan_id, ibc1_client_id, ibc1_conn_id, ibc1_chan_id - - -def main(): - parser = argparse.ArgumentParser( - description='Test all relayer commands, end-to-end') - - parser.add_argument('-c', '--config', - help='configuration file for the relayer', - metavar='CONFIG_FILE', - required=True, - type=Path) - - parser.add_argument('--cmd', - help='command to run the relayer (default: cargo run --bin hermes --)', - metavar='CMD', - default='cargo run --bin hermes --') - - parser.add_argument('--log-level', - help='minimum log level (default: debug)', - metavar='LOG', - choices=['notset', 'debug', 'info', - 'warning', 'error', 'critical'], - default='debug') - - args = parser.parse_args() - - if not args.config.exists(): - print( - f'error: supplied configuration file does not exist: {args.config}') - exit(1) - - config = Config(config_file=args.config, relayer_cmd=args.cmd, - log_level=args.log_level.upper()) - - l.basicConfig( - level=config.log_level, - format='%(asctime)s [%(levelname)8s] %(message)s', - datefmt='%Y-%m-%d %H:%M:%S') - - chains = toml.load(config.config_file)['chains'] - - ibc0 = chains[0]['id'] - ibc1 = chains[1]['id'] - port_id = PortId('transfer') - - ibc0_client_id, ibc0_conn_id, ibc0_chan_id, ibc1_client_id, ibc1_conn_id, ibc1_chan_id = raw( - config, ibc0, ibc1, port_id) - sleep(2.0) - - passive_packets(config, ibc0, ibc1, port_id, ibc0_chan_id, ibc1_chan_id) - sleep(2.0) - - connection.passive_connection_init_then_start( - config, ibc1, ibc0, ibc1_client_id, ibc0_client_id) - sleep(2.0) - - connection.passive_connection_start_then_init( - config, ibc1, ibc0, ibc1_client_id, ibc0_client_id) - sleep(2.0) - - connection.passive_connection_try_then_start( - config, ibc1, ibc0, ibc1_client_id, ibc0_client_id) - sleep(2.0) - - channel.passive_channel_start_then_init( - config, ibc1, ibc0, ibc1_conn_id, port_id) - sleep(2.0) - - channel.passive_channel_init_then_start( - config, ibc1, ibc0, ibc1_conn_id, port_id) - sleep(2.0) - - channel.passive_channel_try_then_start( - config, ibc1, ibc0, ibc1_conn_id, ibc0_conn_id, port_id) - sleep(2.0) - - -if __name__ == "__main__": - main() diff --git a/flake.lock b/flake.lock deleted file mode 100644 index 3c8414ec3d..0000000000 --- a/flake.lock +++ /dev/null @@ -1,801 +0,0 @@ -{ - "nodes": { - "akash-src": { - "flake": false, - "locked": { - "lastModified": 1648485085, - "narHash": "sha256-33FPy0dn6QuqneEqZYkFoCRm9agG7PE+9C/pYH9Gwx4=", - "owner": "ovrclk", - "repo": "akash", - "rev": "5b8b6bbede6c9fbb2df1ca536b8edfcf5652adf8", - "type": "github" - }, - "original": { - "owner": "ovrclk", - "ref": "v0.15.0-rc17", - "repo": "akash", - "type": "github" - } - }, - "apalache-src": { - "flake": false, - "locked": { - "lastModified": 1650241137, - "narHash": "sha256-15jzwbBc7ByxHJbpHmIukSNvih9oxTXeinNamgXirCU=", - "owner": "informalsystems", - "repo": "apalache", - "rev": "40d9ec66b3defe8e72803ca9241a73366497eeee", - "type": "github" - }, - "original": { - "owner": "informalsystems", - "ref": "v0.24.0", - "repo": "apalache", - "type": "github" - } - }, - "cosmos-nix": { - "inputs": { - "akash-src": "akash-src", - "apalache-src": "apalache-src", - "cosmos-sdk-src": "cosmos-sdk-src", - "crescent-src": "crescent-src", - "evmos-src": "evmos-src", - "flake-utils": "flake-utils", - "gaia5-src": "gaia5-src", - "gaia6-ordered-src": "gaia6-ordered-src", - "gaia6_0_2-src": "gaia6_0_2-src", - "gaia6_0_3-src": "gaia6_0_3-src", - "gaia6_0_4-src": "gaia6_0_4-src", - "gaia7-src": "gaia7-src", - "ibc-go-ics29-src": "ibc-go-ics29-src", - "ibc-go-v2-src": "ibc-go-v2-src", - "ibc-go-v3-src": "ibc-go-v3-src", - "ibc-rs-src": "ibc-rs-src", - "ica-src": "ica-src", - "iris-src": "iris-src", - "ixo-src": "ixo-src", - "juno-src": "juno-src", - "nixpkgs": "nixpkgs", - "osmosis-src": "osmosis-src", - "pre-commit-hooks": "pre-commit-hooks", - "regen-src": "regen-src", - "relayer-src": "relayer-src", - "rust-overlay": "rust-overlay", - "sbt-derivation": "sbt-derivation", - "sconfig-src": "sconfig-src", - "sentinel-src": "sentinel-src", - "sifchain-src": "sifchain-src", - "stargaze-src": "stargaze-src", - "stoml-src": "stoml-src", - "terra-src": "terra-src", - "ts-relayer-src": "ts-relayer-src", - "umee-src": "umee-src", - "wasmd-src": "wasmd-src", - "wasmvm_0_16_3-src": "wasmvm_0_16_3-src", - "wasmvm_1_beta7-src": "wasmvm_1_beta7-src" - }, - "locked": { - "lastModified": 1650621073, - "narHash": "sha256-/x6oEpNpPbtNU4sC3fenY/4XWmDCBTd/EU1w9h4viIk=", - "owner": "informalsystems", - "repo": "cosmos.nix", - "rev": "7cd586a42f1468c077f5d0f5d347d26312bcd6fa", - "type": "github" - }, - "original": { - "owner": "informalsystems", - "repo": "cosmos.nix", - "type": "github" - } - }, - "cosmos-sdk-src": { - "flake": false, - "locked": { - "lastModified": 1642008757, - "narHash": "sha256-owsXBdYIf7yENDjumqyQ5AQ+jPHKxVbpQbApUpTzoxo=", - "owner": "cosmos", - "repo": "cosmos-sdk", - "rev": "c1c1ad7425292924b77dc632370815088b2d3c58", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v0.45.0-rc1", - "repo": "cosmos-sdk", - "type": "github" - } - }, - "crescent-src": { - "flake": false, - "locked": { - "lastModified": 1647869429, - "narHash": "sha256-c1xiTB/HgtQJSwD3ccFQIoSHPbJK6rf1nSjnM3r0oCE=", - "owner": "crescent-network", - "repo": "crescent", - "rev": "01980cfd06b06786109eaba78c154e6db1adc3d6", - "type": "github" - }, - "original": { - "owner": "crescent-network", - "ref": "v1.0.0-rc3", - "repo": "crescent", - "type": "github" - } - }, - "evmos-src": { - "flake": false, - "locked": { - "lastModified": 1648233712, - "narHash": "sha256-LCNGZPt6SwzN+4DHU6WcOl3ROhMOdXlIIeFJiJGYidc=", - "owner": "tharsis", - "repo": "evmos", - "rev": "2e886b2882d61081c9b0a6f5aa10d96cd78aff7a", - "type": "github" - }, - "original": { - "owner": "tharsis", - "ref": "v3.0.0-beta", - "repo": "evmos", - "type": "github" - } - }, - "flake-utils": { - "locked": { - "lastModified": 1649676176, - "narHash": "sha256-OWKJratjt2RW151VUlJPRALb7OU2S5s+f0vLj4o1bHM=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "a4b154ebbdc88c8498a5c7b01589addc9e9cb678", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_2": { - "locked": { - "lastModified": 1649676176, - "narHash": "sha256-OWKJratjt2RW151VUlJPRALb7OU2S5s+f0vLj4o1bHM=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "a4b154ebbdc88c8498a5c7b01589addc9e9cb678", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_3": { - "locked": { - "lastModified": 1637014545, - "narHash": "sha256-26IZAc5yzlD9FlDT54io1oqG/bBoyka+FJk5guaX4x4=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "bba5dcc8e0b20ab664967ad83d24d64cb64ec4f4", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "flake-utils_4": { - "locked": { - "lastModified": 1649676176, - "narHash": "sha256-OWKJratjt2RW151VUlJPRALb7OU2S5s+f0vLj4o1bHM=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "a4b154ebbdc88c8498a5c7b01589addc9e9cb678", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, - "gaia5-src": { - "flake": false, - "locked": { - "lastModified": 1634231239, - "narHash": "sha256-NfR9GRBNBlm5hB3lFea+Vlf4dkapZIZg0sZuyOX2cn8=", - "owner": "cosmos", - "repo": "gaia", - "rev": "b72cc994f7156c8a8991e6beed2dde84ad274588", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v5.0.8", - "repo": "gaia", - "type": "github" - } - }, - "gaia6-ordered-src": { - "flake": false, - "locked": { - "lastModified": 1648034337, - "narHash": "sha256-yw3WUCLRvn46xlWAnk6nBmvc3T91aryvBcOOfJ2ocPA=", - "owner": "informalsystems", - "repo": "gaia", - "rev": "d9e61fb98308dea2e02e8c6c6a9ab969dc240cc7", - "type": "github" - }, - "original": { - "owner": "informalsystems", - "ref": "v6.0.4-ordered", - "repo": "gaia", - "type": "github" - } - }, - "gaia6_0_2-src": { - "flake": false, - "locked": { - "lastModified": 1645118548, - "narHash": "sha256-an1JVPCMcJgQYi+inx4MrAcwYjHTVFvDzw865pJc6C8=", - "owner": "cosmos", - "repo": "gaia", - "rev": "05f3795f196dd32e9233db97ed8742f8559cb483", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v6.0.2", - "repo": "gaia", - "type": "github" - } - }, - "gaia6_0_3-src": { - "flake": false, - "locked": { - "lastModified": 1645184577, - "narHash": "sha256-a24C1sooMj8mVGYYV2wL7P3kM7xj/MVzfeggj186PQo=", - "owner": "cosmos", - "repo": "gaia", - "rev": "8f5dd7549fd21b99099e100da043bd8919d37ac3", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v6.0.3", - "repo": "gaia", - "type": "github" - } - }, - "gaia6_0_4-src": { - "flake": false, - "locked": { - "lastModified": 1646904235, - "narHash": "sha256-JdD0DTdMo05ggGvpHN5hugEEtGA0/WQ4bhbryDlfGXo=", - "owner": "cosmos", - "repo": "gaia", - "rev": "305668ab9d962431c79d718bb0ffdeec77a46439", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v6.0.4", - "repo": "gaia", - "type": "github" - } - }, - "gaia7-src": { - "flake": false, - "locked": { - "lastModified": 1648134734, - "narHash": "sha256-A9EqVHR2GiyuemTrjeaJWyIm6e3XUQ3nSm9dBF9gwvk=", - "owner": "cosmos", - "repo": "gaia", - "rev": "79fcf71689358b6212ae91f41070de9669421cf5", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v7.0.0", - "repo": "gaia", - "type": "github" - } - }, - "ibc-go-ics29-src": { - "flake": false, - "locked": { - "lastModified": 1647958967, - "narHash": "sha256-QZ/BQ+qnz+dmosx7/bptIoAyufeWRdT2i420p2ujqf8=", - "owner": "cosmos", - "repo": "ibc-go", - "rev": "ab90f07e9a776a8aafe333a25f91fa43a0e42560", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "ics29-fee-middleware", - "repo": "ibc-go", - "type": "github" - } - }, - "ibc-go-v2-src": { - "flake": false, - "locked": { - "lastModified": 1647351578, - "narHash": "sha256-n2xo3CGyO9wgIPvHgKqDfPjhhy3eHNGX6XDn707BTwk=", - "owner": "cosmos", - "repo": "ibc-go", - "rev": "bfb76858a34489d85c0404e4fdd597389229787d", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v2.2.0", - "repo": "ibc-go", - "type": "github" - } - }, - "ibc-go-v3-src": { - "flake": false, - "locked": { - "lastModified": 1647356202, - "narHash": "sha256-wX3kUzK5dkPeNgmBGP0mE8QeNR4LRo1obVGasZSLSpE=", - "owner": "cosmos", - "repo": "ibc-go", - "rev": "46e020640e66f9043c14c53a4d215a5b457d6703", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v3.0.0", - "repo": "ibc-go", - "type": "github" - } - }, - "ibc-rs-src": { - "flake": false, - "locked": { - "lastModified": 1646665771, - "narHash": "sha256-kOc+5MVzgdUvpJrGDkjcv0rmQRPImPDSrkfFW6K+q7M=", - "owner": "informalsystems", - "repo": "ibc-rs", - "rev": "aed240774df62b19099f796f2dd8459dabd72c88", - "type": "github" - }, - "original": { - "owner": "informalsystems", - "ref": "v0.13.0-rc.0", - "repo": "ibc-rs", - "type": "github" - } - }, - "ica-src": { - "flake": false, - "locked": { - "lastModified": 1647255020, - "narHash": "sha256-Ah5pivnAmk3W0fLWnrBbi84tqwJYQETSILSvNVH6fI8=", - "owner": "cosmos", - "repo": "interchain-accounts-demo", - "rev": "09b6a493a84a135f395d74d5ec82ea983617a714", - "type": "github" - }, - "original": { - "owner": "cosmos", - "repo": "interchain-accounts-demo", - "type": "github" - } - }, - "iris-src": { - "flake": false, - "locked": { - "lastModified": 1618986686, - "narHash": "sha256-1nPJOuYeGjzBYFCS0IiC5j9TJd5KVa9IL0kROks328E=", - "owner": "irisnet", - "repo": "irishub", - "rev": "53e156b2ee7eeb0b9d5b263066d0d3c88a1af736", - "type": "github" - }, - "original": { - "owner": "irisnet", - "ref": "v1.1.1", - "repo": "irishub", - "type": "github" - } - }, - "ixo-src": { - "flake": false, - "locked": { - "lastModified": 1645476442, - "narHash": "sha256-Ewp9UyoH6z7YGrcXVpYJveRvDq02c1mNZj2hzlOoW8s=", - "owner": "ixofoundation", - "repo": "ixo-blockchain", - "rev": "2bef5d79205057be71677837dc1174be848e13e9", - "type": "github" - }, - "original": { - "owner": "ixofoundation", - "ref": "v0.18.0-rc1", - "repo": "ixo-blockchain", - "type": "github" - } - }, - "juno-src": { - "flake": false, - "locked": { - "lastModified": 1647987514, - "narHash": "sha256-Mtiin+GOH/dyrr7cO+18er+uwYjCpQDY8xhA+kkzniM=", - "owner": "CosmosContracts", - "repo": "juno", - "rev": "9a1c32f508e6314fb73e57db35313cb329639424", - "type": "github" - }, - "original": { - "owner": "CosmosContracts", - "ref": "v2.3.0-beta.2", - "repo": "juno", - "type": "github" - } - }, - "nixpkgs": { - "locked": { - "lastModified": 1648219316, - "narHash": "sha256-Ctij+dOi0ZZIfX5eMhgwugfvB+WZSrvVNAyAuANOsnQ=", - "owner": "nixos", - "repo": "nixpkgs", - "rev": "30d3d79b7d3607d56546dd2a6b49e156ba0ec634", - "type": "github" - }, - "original": { - "owner": "nixos", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_2": { - "locked": { - "lastModified": 1651114127, - "narHash": "sha256-/lLC0wkMZkAdA5e1W76SnJzbhfOGDvync3VRHJMtAKk=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "6766fb6503ae1ebebc2a9704c162b2aef351f921", - "type": "github" - }, - "original": { - "id": "nixpkgs", - "type": "indirect" - } - }, - "nixpkgs_3": { - "locked": { - "lastModified": 1637453606, - "narHash": "sha256-Gy6cwUswft9xqsjWxFYEnx/63/qzaFUwatcbV5GF/GQ=", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "8afc4e543663ca0a6a4f496262cd05233737e732", - "type": "github" - }, - "original": { - "owner": "NixOS", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "nixpkgs_4": { - "locked": { - "lastModified": 1651114127, - "narHash": "sha256-/lLC0wkMZkAdA5e1W76SnJzbhfOGDvync3VRHJMtAKk=", - "owner": "nixos", - "repo": "nixpkgs", - "rev": "6766fb6503ae1ebebc2a9704c162b2aef351f921", - "type": "github" - }, - "original": { - "owner": "nixos", - "ref": "nixpkgs-unstable", - "repo": "nixpkgs", - "type": "github" - } - }, - "osmosis-src": { - "flake": false, - "locked": { - "lastModified": 1646876870, - "narHash": "sha256-cRrGlMvgZI4GK0IdSF7mtEo/+zq42gbcJEHpE74vJdc=", - "owner": "osmosis-labs", - "repo": "osmosis", - "rev": "e55ad888546b4c970c7474a26e7c60fe079ca686", - "type": "github" - }, - "original": { - "owner": "osmosis-labs", - "ref": "v7.0.4", - "repo": "osmosis", - "type": "github" - } - }, - "pre-commit-hooks": { - "inputs": { - "flake-utils": "flake-utils_2", - "nixpkgs": "nixpkgs_2" - }, - "locked": { - "lastModified": 1646153636, - "narHash": "sha256-AlWHMzK+xJ1mG267FdT8dCq/HvLCA6jwmx2ZUy5O8tY=", - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "rev": "b6bc0b21e1617e2b07d8205e7fae7224036dfa4b", - "type": "github" - }, - "original": { - "owner": "cachix", - "repo": "pre-commit-hooks.nix", - "type": "github" - } - }, - "regen-src": { - "flake": false, - "locked": { - "lastModified": 1645832054, - "narHash": "sha256-lDb0/Bw4hAX71jsCQJUju1mKYNacWEVezx6+KdIdu6Q=", - "owner": "regen-network", - "repo": "regen-ledger", - "rev": "5fb6268ed18a488ab88fb3bfa4b84e10892a7562", - "type": "github" - }, - "original": { - "owner": "regen-network", - "ref": "v3.0.0", - "repo": "regen-ledger", - "type": "github" - } - }, - "relayer-src": { - "flake": false, - "locked": { - "lastModified": 1635197290, - "narHash": "sha256-xD+xZG4Gb6557y/jkXTGdbt8qJ6izMgC4H3uo2/j5vU=", - "owner": "cosmos", - "repo": "relayer", - "rev": "7797aa103af68faa4269af586fe6df1d30e91d4a", - "type": "github" - }, - "original": { - "owner": "cosmos", - "ref": "v1.0.0", - "repo": "relayer", - "type": "github" - } - }, - "root": { - "inputs": { - "cosmos-nix": "cosmos-nix", - "flake-utils": "flake-utils_4", - "nixpkgs": "nixpkgs_4" - } - }, - "rust-overlay": { - "inputs": { - "flake-utils": "flake-utils_3", - "nixpkgs": "nixpkgs_3" - }, - "locked": { - "lastModified": 1645755566, - "narHash": "sha256-BwjpcywzB+4hHuStgYcOWRomI8I2PCtORUbNEL6qMBk=", - "owner": "oxalica", - "repo": "rust-overlay", - "rev": "46d8d20fce510c6a25fa66f36e31f207f6ea49e4", - "type": "github" - }, - "original": { - "owner": "oxalica", - "repo": "rust-overlay", - "type": "github" - } - }, - "sbt-derivation": { - "locked": { - "lastModified": 1617466857, - "narHash": "sha256-Z7eWMLreLtiSiJ3nWDWBy1w9WNEFexkYCgT/dWZF7yo=", - "owner": "zaninime", - "repo": "sbt-derivation", - "rev": "920b6f187937493371e2b1687261017e6e014cf1", - "type": "github" - }, - "original": { - "owner": "zaninime", - "repo": "sbt-derivation", - "type": "github" - } - }, - "sconfig-src": { - "flake": false, - "locked": { - "lastModified": 1594094862, - "narHash": "sha256-jR2hkR0YlPyW2nKWJl90kL80R+9psNKGPYxGg7Y/YGw=", - "owner": "freshautomations", - "repo": "sconfig", - "rev": "88043754c024aec433b3b059af170b6f555931c3", - "type": "github" - }, - "original": { - "owner": "freshautomations", - "repo": "sconfig", - "type": "github" - } - }, - "sentinel-src": { - "flake": false, - "locked": { - "lastModified": 1647195309, - "narHash": "sha256-+ZobsjLNxVL3+zi6OEFQhff6Gbd9kng8B0haqcOoiP0=", - "owner": "sentinel-official", - "repo": "hub", - "rev": "7001dc8bc4517efa33cfcc83e8b127528b5bdf2e", - "type": "github" - }, - "original": { - "owner": "sentinel-official", - "ref": "v0.9.0-rc0", - "repo": "hub", - "type": "github" - } - }, - "sifchain-src": { - "flake": false, - "locked": { - "lastModified": 1648486445, - "narHash": "sha256-n5fmWtdrc0Rhs6Uo+zjcSXmyEFVIsA5L9dlrbRXGDmU=", - "owner": "Sifchain", - "repo": "sifnode", - "rev": "269cfadf6a4c08879247c2b8373323ae7239a425", - "type": "github" - }, - "original": { - "owner": "Sifchain", - "ref": "v0.12.1", - "repo": "sifnode", - "type": "github" - } - }, - "stargaze-src": { - "flake": false, - "locked": { - "lastModified": 1645539964, - "narHash": "sha256-5I5pdnBJHwNaI2Soet+zH3aH+pUbYdC9TgHBjOd1TmA=", - "owner": "public-awesome", - "repo": "stargaze", - "rev": "6ee57f18714a6d94cc6205afcd1af2ab655f8f0f", - "type": "github" - }, - "original": { - "owner": "public-awesome", - "ref": "v3.0.0", - "repo": "stargaze", - "type": "github" - } - }, - "stoml-src": { - "flake": false, - "locked": { - "lastModified": 1622172633, - "narHash": "sha256-PvKkOjjWkmK90PzKcOBq0pUWLjHLjfYs9PRqqzAR7/8=", - "owner": "freshautomations", - "repo": "stoml", - "rev": "f5dab84dbf52345a1f36389aec38b02fda086a47", - "type": "github" - }, - "original": { - "owner": "freshautomations", - "repo": "stoml", - "type": "github" - } - }, - "terra-src": { - "flake": false, - "locked": { - "lastModified": 1645516218, - "narHash": "sha256-7cmVYWFLeOZJtbfw8qaVKLDMVafoeFDXOcrmrMS9buE=", - "owner": "terra-money", - "repo": "core", - "rev": "a6b93b72a7d4fabbbb85fb89e685426f5d07cac1", - "type": "github" - }, - "original": { - "owner": "terra-money", - "ref": "v0.5.17", - "repo": "core", - "type": "github" - } - }, - "ts-relayer-src": { - "flake": false, - "locked": { - "lastModified": 1640291594, - "narHash": "sha256-mSI+qgB+e9YcFrcUAgHQnbXOQ8wxO2GmD0wNe+3ya0g=", - "owner": "confio", - "repo": "ts-relayer", - "rev": "23930794ddb64afcc80ac73ffe31ca69072c6549", - "type": "github" - }, - "original": { - "owner": "confio", - "ref": "v0.4.0", - "repo": "ts-relayer", - "type": "github" - } - }, - "umee-src": { - "flake": false, - "locked": { - "lastModified": 1648176855, - "narHash": "sha256-s7MnAaM+O84JDO1uBNZm1qGN6ZfYmhXD5rCvns4u/rc=", - "owner": "umee-network", - "repo": "umee", - "rev": "3c9b8db04d6ab19d31e89df65232abc35d1a8a59", - "type": "github" - }, - "original": { - "owner": "umee-network", - "ref": "v2.0.0", - "repo": "umee", - "type": "github" - } - }, - "wasmd-src": { - "flake": false, - "locked": { - "lastModified": 1646852618, - "narHash": "sha256-3ifvKZhdv50E6yA8jDiVnartZZ34Ji09VJbtkkW7Lig=", - "owner": "CosmWasm", - "repo": "wasmd", - "rev": "3bc0bdeab3fa2b3f7de745622226ff36c2ec6d6a", - "type": "github" - }, - "original": { - "owner": "CosmWasm", - "ref": "v0.24.0", - "repo": "wasmd", - "type": "github" - } - }, - "wasmvm_0_16_3-src": { - "flake": false, - "locked": { - "lastModified": 1640251271, - "narHash": "sha256-XvgAMDvAgzWaH7Q+mNZUBoaVhqAVlZ4ucIL0QFyNvWw=", - "owner": "CosmWasm", - "repo": "wasmvm", - "rev": "458e983721624548e66c0dcdd35140383966515e", - "type": "github" - }, - "original": { - "owner": "CosmWasm", - "ref": "v0.16.3", - "repo": "wasmvm", - "type": "github" - } - }, - "wasmvm_1_beta7-src": { - "flake": false, - "locked": { - "lastModified": 1646675433, - "narHash": "sha256-tt9aAPLxtIRsG1VFM1YAIHSotuBl170EiBcHSWTtARI=", - "owner": "CosmWasm", - "repo": "wasmvm", - "rev": "f7015565a59255cd09ebfcbf9345f3c87666fedd", - "type": "github" - }, - "original": { - "owner": "CosmWasm", - "ref": "v1.0.0-beta7", - "repo": "wasmvm", - "type": "github" - } - } - }, - "root": "root", - "version": 7 -} diff --git a/flake.nix b/flake.nix deleted file mode 100644 index d8a412d92e..0000000000 --- a/flake.nix +++ /dev/null @@ -1,47 +0,0 @@ -{ - description = "Nix development dependencies for ibc-rs"; - - inputs = { - nixpkgs.url = github:nixos/nixpkgs/nixpkgs-unstable; - flake-utils.url = github:numtide/flake-utils; - cosmos-nix.url = github:informalsystems/cosmos.nix; - }; - - outputs = inputs: - let - utils = inputs.flake-utils.lib; - in - utils.eachSystem - [ - "aarch64-linux" - "aarch64-darwin" - "x86_64-darwin" - "x86_64-linux" - ] - (system: - let - nixpkgs = import inputs.nixpkgs { - inherit system; - }; - - cosmos-nix = inputs.cosmos-nix.packages.${system}; - in - { - packages = { - inherit (cosmos-nix) - gaia5 - gaia6 - gaia7 - ica - gaia6-ordered - ibc-go-v2-simapp - ibc-go-v3-simapp - apalache - ; - - python = nixpkgs.python3.withPackages (p: [ - p.toml - ]); - }; - }); -} diff --git a/guide/.gitignore b/guide/.gitignore deleted file mode 100644 index 7585238efe..0000000000 --- a/guide/.gitignore +++ /dev/null @@ -1 +0,0 @@ -book diff --git a/guide/README.md b/guide/README.md deleted file mode 100644 index c5aa9dd797..0000000000 --- a/guide/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# Hermes Guide - -Hermes is the name of the binary that comes packaged with -[IBC Relayer CLI](https://crates.io/crates/ibc-relayer-cli) crate. - -This directory comprises a comprehensive guide to Hermes. -In order to build and view this guide you need to install [`mdBook`] -(https://github.com/rust-lang/mdBook). -mdBook is a utility to create modern online books from Markdown files. - -This guide should be permanently deployed at its latest stable version at -[hermes.informal.systems](https://hermes.informal.systems). - -Current version: `0.15.0`. - -The version of this guide is aligned with the [versioning of the ibc crates](../README.md). - -## Local deployment - -This section describes how to deploy this guide locally on your system. - -### Pre-requisites - -Install `mdBook` using [`cargo`](https://doc.rust-lang.org/cargo/): - -```bash -cargo install mdbook -``` - -You also need to install the mdbook plug-in for [`mermaid`](https://mermaid-js.github.io/mermaid/#/) to generate graphs and diagrams, and the `mdbook-toc` plug-in for generating table of contents: - -```bash -cargo install mdbook-mermaid -cargo install mdbook-toc -``` - -### Building and viewing the guide locally - -In order to build and view the guide on your local machine, please follow this instructions. - -#### Change to the `guide` dir - -This assumes your current dir is the `ibc-rs` repository - -```bash -cd guide -``` - -#### Build the guide - -To build and view the guide in your browser, run the `mdbook` command below: - -```bash -mdbook serve -``` - -#### View the guide - -This will host the guide in your local machine. Open your browser and navigate to: - -```bash -http://localhost:3000 -``` - -## Adding or editing new content to the guide - -Please check the [mdBook documentation](https://rust-lang.github.io/mdBook/index.html) for additional information on how to add new content to the guide. - -Basically if you want to add new content to the guide, just add an entry to the `SUMMARY.md` Markdown file which is the TOC page. Then create a page for the entry you've added to the `SUMMARY.md` page. If you don't create the page, but save the `SUMMARY.md` file and build again, `mdBook` will create the page automatically for you. - -#### Local development -If you are adding content using your favorite IDE and have a terminal opened running `mdbook serve`, it provides a convenient watch functionality so any changes detected on local files will trigger another build and if you refresh the guide on your browser they will be shown there. - -#### Submit your changes -Once you finish adding the new content just commit your changes (`git commit`) and push them to the respository (`git push`). diff --git a/guide/book.toml b/guide/book.toml deleted file mode 100644 index c11547b50c..0000000000 --- a/guide/book.toml +++ /dev/null @@ -1,19 +0,0 @@ -[book] -authors = ["Informal Systems Inc."] -language = "en" -multilingual = false -src = "src" -title = "Hermes (IBC Relayer CLI) Documentation" - -[preprocessor.mermaid] -command = "mdbook-mermaid" - -[preprocessor.toc] -command = "mdbook-toc" -renderer = ["html"] - -[output.html] -additional-js = ["mermaid.min.js", "mermaid-init.js"] - -# Uncomment to trigger the link check -# [output.linkcheck] diff --git a/guide/mermaid-init.js b/guide/mermaid-init.js deleted file mode 100644 index 72f038e237..0000000000 --- a/guide/mermaid-init.js +++ /dev/null @@ -1,16 +0,0 @@ -// Adapt Mermaid theme to rustdoc theme. -// https://github.com/mersinvald/aquamarine/blob/ce24cd6e3a84e4f80a60c21e218b9c6f26b001fa/src/attrs.rs#L89-L101 - -function get_mermaid_theme() { - let is_dark = /.*(dark|coal|navy|ayu).*/.test(document.documentElement.className); - if (is_dark) { - return 'dark'; - } else { - return 'default'; - } -} - -mermaid.initialize({ - startOnLoad: true, - theme: get_mermaid_theme() -}); diff --git a/guide/mermaid.min.js b/guide/mermaid.min.js deleted file mode 100644 index 8d71a81caf..0000000000 --- a/guide/mermaid.min.js +++ /dev/null @@ -1,32 +0,0 @@ -!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.mermaid=e():t.mermaid=e()}("undefined"!=typeof self?self:this,(function(){return function(t){var e={};function n(r){if(e[r])return e[r].exports;var i=e[r]={i:r,l:!1,exports:{}};return t[r].call(i.exports,i,i.exports,n),i.l=!0,i.exports}return n.m=t,n.c=e,n.d=function(t,e,r){n.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:r})},n.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},n.t=function(t,e){if(1&e&&(t=n(t)),8&e)return t;if(4&e&&"object"==typeof t&&t&&t.__esModule)return t;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:t}),2&e&&"string"!=typeof t)for(var i in t)n.d(r,i,function(e){return t[e]}.bind(null,i));return r},n.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return n.d(e,"a",e),e},n.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},n.p="",n(n.s=383)}([function(t,e,n){"use strict";n.r(e);var r=function(t,e){return te?1:t>=e?0:NaN},i=function(t){var e;return 1===t.length&&(e=t,t=function(t,n){return r(e(t),n)}),{left:function(e,n,r,i){for(null==r&&(r=0),null==i&&(i=e.length);r>>1;t(e[a],n)<0?r=a+1:i=a}return r},right:function(e,n,r,i){for(null==r&&(r=0),null==i&&(i=e.length);r>>1;t(e[a],n)>0?i=a:r=a+1}return r}}};var a=i(r),o=a.right,s=a.left,c=o,u=function(t,e){null==e&&(e=l);for(var n=0,r=t.length-1,i=t[0],a=new Array(r<0?0:r);nt?1:e>=t?0:NaN},d=function(t){return null===t?NaN:+t},p=function(t,e){var n,r,i=t.length,a=0,o=-1,s=0,c=0;if(null==e)for(;++o1)return c/(a-1)},g=function(t,e){var n=p(t,e);return n?Math.sqrt(n):n},y=function(t,e){var n,r,i,a=t.length,o=-1;if(null==e){for(;++o=n)for(r=i=n;++on&&(r=n),i=n)for(r=i=n;++on&&(r=n),i0)return[t];if((r=e0)for(t=Math.ceil(t/o),e=Math.floor(e/o),a=new Array(i=Math.ceil(e-t+1));++s=0?(a>=w?10:a>=E?5:a>=T?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(a>=w?10:a>=E?5:a>=T?2:1)}function A(t,e,n){var r=Math.abs(e-t)/Math.max(0,n),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),a=r/i;return a>=w?i*=10:a>=E?i*=5:a>=T&&(i*=2),eh;)f.pop(),--d;var p,g=new Array(d+1);for(i=0;i<=d;++i)(p=g[i]=[]).x0=i>0?f[i-1]:l,p.x1=i=1)return+n(t[r-1],r-1,t);var r,i=(r-1)*e,a=Math.floor(i),o=+n(t[a],a,t);return o+(+n(t[a+1],a+1,t)-o)*(i-a)}},N=function(t,e,n){return t=b.call(t,d).sort(r),Math.ceil((n-e)/(2*(D(t,.75)-D(t,.25))*Math.pow(t.length,-1/3)))},B=function(t,e,n){return Math.ceil((n-e)/(3.5*g(t)*Math.pow(t.length,-1/3)))},L=function(t,e){var n,r,i=t.length,a=-1;if(null==e){for(;++a=n)for(r=n;++ar&&(r=n)}else for(;++a=n)for(r=n;++ar&&(r=n);return r},F=function(t,e){var n,r=t.length,i=r,a=-1,o=0;if(null==e)for(;++a=0;)for(e=(r=t[i]).length;--e>=0;)n[--o]=r[e];return n},j=function(t,e){var n,r,i=t.length,a=-1;if(null==e){for(;++a=n)for(r=n;++an&&(r=n)}else for(;++a=n)for(r=n;++an&&(r=n);return r},R=function(t,e){for(var n=e.length,r=new Array(n);n--;)r[n]=t[e[n]];return r},Y=function(t,e){if(n=t.length){var n,i,a=0,o=0,s=t[o];for(null==e&&(e=r);++a=0&&(n=t.slice(r+1),t=t.slice(0,r)),t&&!e.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:n}}))}function ct(t,e){for(var n,r=0,i=t.length;r0)for(var n,r,i=new Array(n),a=0;ae?1:t>=e?0:NaN}var _t="http://www.w3.org/1999/xhtml",kt={svg:"http://www.w3.org/2000/svg",xhtml:_t,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"},wt=function(t){var e=t+="",n=e.indexOf(":");return n>=0&&"xmlns"!==(e=t.slice(0,n))&&(t=t.slice(n+1)),kt.hasOwnProperty(e)?{space:kt[e],local:t}:t};function Et(t){return function(){this.removeAttribute(t)}}function Tt(t){return function(){this.removeAttributeNS(t.space,t.local)}}function Ct(t,e){return function(){this.setAttribute(t,e)}}function St(t,e){return function(){this.setAttributeNS(t.space,t.local,e)}}function At(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttribute(t):this.setAttribute(t,n)}}function Mt(t,e){return function(){var n=e.apply(this,arguments);null==n?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,n)}}var Ot=function(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView};function Dt(t){return function(){this.style.removeProperty(t)}}function Nt(t,e,n){return function(){this.style.setProperty(t,e,n)}}function Bt(t,e,n){return function(){var r=e.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,n)}}function Lt(t,e){return t.style.getPropertyValue(e)||Ot(t).getComputedStyle(t,null).getPropertyValue(e)}function Ft(t){return function(){delete this[t]}}function Pt(t,e){return function(){this[t]=e}}function It(t,e){return function(){var n=e.apply(this,arguments);null==n?delete this[t]:this[t]=n}}function jt(t){return t.trim().split(/^|\s+/)}function Rt(t){return t.classList||new Yt(t)}function Yt(t){this._node=t,this._names=jt(t.getAttribute("class")||"")}function zt(t,e){for(var n=Rt(t),r=-1,i=e.length;++r=0&&(this._names.splice(e,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};function Ht(){this.textContent=""}function Gt(t){return function(){this.textContent=t}}function qt(t){return function(){var e=t.apply(this,arguments);this.textContent=null==e?"":e}}function Xt(){this.innerHTML=""}function Zt(t){return function(){this.innerHTML=t}}function Jt(t){return function(){var e=t.apply(this,arguments);this.innerHTML=null==e?"":e}}function Qt(){this.nextSibling&&this.parentNode.appendChild(this)}function Kt(){this.previousSibling&&this.parentNode.insertBefore(this,this.parentNode.firstChild)}function te(t){return function(){var e=this.ownerDocument,n=this.namespaceURI;return n===_t&&e.documentElement.namespaceURI===_t?e.createElement(t):e.createElementNS(n,t)}}function ee(t){return function(){return this.ownerDocument.createElementNS(t.space,t.local)}}var ne=function(t){var e=wt(t);return(e.local?ee:te)(e)};function re(){return null}function ie(){var t=this.parentNode;t&&t.removeChild(this)}function ae(){var t=this.cloneNode(!1),e=this.parentNode;return e?e.insertBefore(t,this.nextSibling):t}function oe(){var t=this.cloneNode(!0),e=this.parentNode;return e?e.insertBefore(t,this.nextSibling):t}var se={},ce=null;"undefined"!=typeof document&&("onmouseenter"in document.documentElement||(se={mouseenter:"mouseover",mouseleave:"mouseout"}));function ue(t,e,n){return t=le(t,e,n),function(e){var n=e.relatedTarget;n&&(n===this||8&n.compareDocumentPosition(this))||t.call(this,e)}}function le(t,e,n){return function(r){var i=ce;ce=r;try{t.call(this,this.__data__,e,n)}finally{ce=i}}}function he(t){return t.trim().split(/^|\s+/).map((function(t){var e="",n=t.indexOf(".");return n>=0&&(e=t.slice(n+1),t=t.slice(0,n)),{type:t,name:e}}))}function fe(t){return function(){var e=this.__on;if(e){for(var n,r=0,i=-1,a=e.length;r=_&&(_=x+1);!(b=v[_])&&++_=0;)(r=i[a])&&(o&&4^r.compareDocumentPosition(o)&&o.parentNode.insertBefore(r,o),o=r);return this},sort:function(t){function e(e,n){return e&&n?t(e.__data__,n.__data__):!e-!n}t||(t=xt);for(var n=this._groups,r=n.length,i=new Array(r),a=0;a1?this.each((null==e?Dt:"function"==typeof e?Bt:Nt)(t,e,null==n?"":n)):Lt(this.node(),t)},property:function(t,e){return arguments.length>1?this.each((null==e?Ft:"function"==typeof e?It:Pt)(t,e)):this.node()[t]},classed:function(t,e){var n=jt(t+"");if(arguments.length<2){for(var r=Rt(this.node()),i=-1,a=n.length;++i>8&15|e>>4&240,e>>4&15|240&e,(15&e)<<4|15&e,1):8===n?new qe(e>>24&255,e>>16&255,e>>8&255,(255&e)/255):4===n?new qe(e>>12&15|e>>8&240,e>>8&15|e>>4&240,e>>4&15|240&e,((15&e)<<4|15&e)/255):null):(e=Le.exec(t))?new qe(e[1],e[2],e[3],1):(e=Fe.exec(t))?new qe(255*e[1]/100,255*e[2]/100,255*e[3]/100,1):(e=Pe.exec(t))?Ve(e[1],e[2],e[3],e[4]):(e=Ie.exec(t))?Ve(255*e[1]/100,255*e[2]/100,255*e[3]/100,e[4]):(e=je.exec(t))?Qe(e[1],e[2]/100,e[3]/100,1):(e=Re.exec(t))?Qe(e[1],e[2]/100,e[3]/100,e[4]):Ye.hasOwnProperty(t)?We(Ye[t]):"transparent"===t?new qe(NaN,NaN,NaN,0):null}function We(t){return new qe(t>>16&255,t>>8&255,255&t,1)}function Ve(t,e,n,r){return r<=0&&(t=e=n=NaN),new qe(t,e,n,r)}function He(t){return t instanceof Me||(t=$e(t)),t?new qe((t=t.rgb()).r,t.g,t.b,t.opacity):new qe}function Ge(t,e,n,r){return 1===arguments.length?He(t):new qe(t,e,n,null==r?1:r)}function qe(t,e,n,r){this.r=+t,this.g=+e,this.b=+n,this.opacity=+r}function Xe(){return"#"+Je(this.r)+Je(this.g)+Je(this.b)}function Ze(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function Je(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function Qe(t,e,n,r){return r<=0?t=e=n=NaN:n<=0||n>=1?t=e=NaN:e<=0&&(t=NaN),new en(t,e,n,r)}function Ke(t){if(t instanceof en)return new en(t.h,t.s,t.l,t.opacity);if(t instanceof Me||(t=$e(t)),!t)return new en;if(t instanceof en)return t;var e=(t=t.rgb()).r/255,n=t.g/255,r=t.b/255,i=Math.min(e,n,r),a=Math.max(e,n,r),o=NaN,s=a-i,c=(a+i)/2;return s?(o=e===a?(n-r)/s+6*(n0&&c<1?0:o,new en(o,s,c,t.opacity)}function tn(t,e,n,r){return 1===arguments.length?Ke(t):new en(t,e,n,null==r?1:r)}function en(t,e,n,r){this.h=+t,this.s=+e,this.l=+n,this.opacity=+r}function nn(t,e,n){return 255*(t<60?e+(n-e)*t/60:t<180?n:t<240?e+(n-e)*(240-t)/60:e)}function rn(t,e,n,r,i){var a=t*t,o=a*t;return((1-3*t+3*a-o)*e+(4-6*a+3*o)*n+(1+3*t+3*a-3*o)*r+o*i)/6}Se(Me,$e,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:ze,formatHex:ze,formatHsl:function(){return Ke(this).formatHsl()},formatRgb:Ue,toString:Ue}),Se(qe,Ge,Ae(Me,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new qe(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new qe(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:Xe,formatHex:Xe,formatRgb:Ze,toString:Ze})),Se(en,tn,Ae(Me,{brighter:function(t){return t=null==t?1/.7:Math.pow(1/.7,t),new en(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?.7:Math.pow(.7,t),new en(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),e=isNaN(t)||isNaN(this.s)?0:this.s,n=this.l,r=n+(n<.5?n:1-n)*e,i=2*n-r;return new qe(nn(t>=240?t-240:t+120,i,r),nn(t,i,r),nn(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}));var an=function(t){var e=t.length-1;return function(n){var r=n<=0?n=0:n>=1?(n=1,e-1):Math.floor(n*e),i=t[r],a=t[r+1],o=r>0?t[r-1]:2*i-a,s=r180||n<-180?n-360*Math.round(n/360):n):sn(isNaN(t)?e:t)}function ln(t){return 1==(t=+t)?hn:function(e,n){return n-e?function(t,e,n){return t=Math.pow(t,n),e=Math.pow(e,n)-t,n=1/n,function(r){return Math.pow(t+r*e,n)}}(e,n,t):sn(isNaN(e)?n:e)}}function hn(t,e){var n=e-t;return n?cn(t,n):sn(isNaN(t)?e:t)}var fn=function t(e){var n=ln(e);function r(t,e){var r=n((t=Ge(t)).r,(e=Ge(e)).r),i=n(t.g,e.g),a=n(t.b,e.b),o=hn(t.opacity,e.opacity);return function(e){return t.r=r(e),t.g=i(e),t.b=a(e),t.opacity=o(e),t+""}}return r.gamma=t,r}(1);function dn(t){return function(e){var n,r,i=e.length,a=new Array(i),o=new Array(i),s=new Array(i);for(n=0;na&&(i=e.slice(a,i),s[o]?s[o]+=i:s[++o]=i),(n=n[0])===(r=r[0])?s[o]?s[o]+=r:s[++o]=r:(s[++o]=null,c.push({i:o,x:_n(n,r)})),a=En.lastIndex;return a=0&&e._call.call(null,t),e=e._next;--Bn}function Hn(){In=(Pn=Rn.now())+jn,Bn=Ln=0;try{Vn()}finally{Bn=0,function(){var t,e,n=Tn,r=1/0;for(;n;)n._call?(r>n._time&&(r=n._time),t=n,n=n._next):(e=n._next,n._next=null,n=t?t._next=e:Tn=e);Cn=t,qn(r)}(),In=0}}function Gn(){var t=Rn.now(),e=t-Pn;e>1e3&&(jn-=e,Pn=t)}function qn(t){Bn||(Ln&&(Ln=clearTimeout(Ln)),t-In>24?(t<1/0&&(Ln=setTimeout(Hn,t-Rn.now()-jn)),Fn&&(Fn=clearInterval(Fn))):(Fn||(Pn=Rn.now(),Fn=setInterval(Gn,1e3)),Bn=1,Yn(Hn)))}$n.prototype=Wn.prototype={constructor:$n,restart:function(t,e,n){if("function"!=typeof t)throw new TypeError("callback is not a function");n=(null==n?zn():+n)+(null==e?0:+e),this._next||Cn===this||(Cn?Cn._next=this:Tn=this,Cn=this),this._call=t,this._time=n,qn()},stop:function(){this._call&&(this._call=null,this._time=1/0,qn())}};var Xn=function(t,e,n){var r=new $n;return e=null==e?0:+e,r.restart((function(n){r.stop(),t(n+e)}),e,n),r},Zn=lt("start","end","cancel","interrupt"),Jn=[],Qn=function(t,e,n,r,i,a){var o=t.__transition;if(o){if(n in o)return}else t.__transition={};!function(t,e,n){var r,i=t.__transition;function a(c){var u,l,h,f;if(1!==n.state)return s();for(u in i)if((f=i[u]).name===n.name){if(3===f.state)return Xn(a);4===f.state?(f.state=6,f.timer.stop(),f.on.call("interrupt",t,t.__data__,f.index,f.group),delete i[u]):+u0)throw new Error("too late; already scheduled");return n}function tr(t,e){var n=er(t,e);if(n.state>3)throw new Error("too late; already running");return n}function er(t,e){var n=t.__transition;if(!n||!(n=n[e]))throw new Error("transition not found");return n}var nr,rr,ir,ar,or=function(t,e){var n,r,i,a=t.__transition,o=!0;if(a){for(i in e=null==e?null:e+"",a)(n=a[i]).name===e?(r=n.state>2&&n.state<5,n.state=6,n.timer.stop(),n.on.call(r?"interrupt":"cancel",t,t.__data__,n.index,n.group),delete a[i]):o=!1;o&&delete t.__transition}},sr=180/Math.PI,cr={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1},ur=function(t,e,n,r,i,a){var o,s,c;return(o=Math.sqrt(t*t+e*e))&&(t/=o,e/=o),(c=t*n+e*r)&&(n-=t*c,r-=e*c),(s=Math.sqrt(n*n+r*r))&&(n/=s,r/=s,c/=s),t*r180?e+=360:e-t>180&&(t+=360),a.push({i:n.push(i(n)+"rotate(",null,r)-2,x:_n(t,e)})):e&&n.push(i(n)+"rotate("+e+r)}(a.rotate,o.rotate,s,c),function(t,e,n,a){t!==e?a.push({i:n.push(i(n)+"skewX(",null,r)-2,x:_n(t,e)}):e&&n.push(i(n)+"skewX("+e+r)}(a.skewX,o.skewX,s,c),function(t,e,n,r,a,o){if(t!==n||e!==r){var s=a.push(i(a)+"scale(",null,",",null,")");o.push({i:s-4,x:_n(t,n)},{i:s-2,x:_n(e,r)})}else 1===n&&1===r||a.push(i(a)+"scale("+n+","+r+")")}(a.scaleX,a.scaleY,o.scaleX,o.scaleY,s,c),a=o=null,function(t){for(var e,n=-1,r=c.length;++n=0&&(t=t.slice(0,e)),!t||"start"===t}))}(e)?Kn:tr;return function(){var o=a(this,t),s=o.on;s!==r&&(i=(r=s).copy()).on(e,n),o.on=i}}var Br=_e.prototype.constructor;function Lr(t){return function(){this.style.removeProperty(t)}}function Fr(t,e,n){return function(r){this.style.setProperty(t,e.call(this,r),n)}}function Pr(t,e,n){var r,i;function a(){var a=e.apply(this,arguments);return a!==i&&(r=(i=a)&&Fr(t,a,n)),r}return a._value=e,a}function Ir(t){return function(e){this.textContent=t.call(this,e)}}function jr(t){var e,n;function r(){var r=t.apply(this,arguments);return r!==n&&(e=(n=r)&&Ir(r)),e}return r._value=t,r}var Rr=0;function Yr(t,e,n,r){this._groups=t,this._parents=e,this._name=n,this._id=r}function zr(t){return _e().transition(t)}function Ur(){return++Rr}var $r=_e.prototype;function Wr(t){return t*t*t}function Vr(t){return--t*t*t+1}function Hr(t){return((t*=2)<=1?t*t*t:(t-=2)*t*t+2)/2}Yr.prototype=zr.prototype={constructor:Yr,select:function(t){var e=this._name,n=this._id;"function"!=typeof t&&(t=ft(t));for(var r=this._groups,i=r.length,a=new Array(i),o=0;o1&&n.name===e)return new Yr([[t]],Xr,e,+r);return null},Jr=function(t){return function(){return t}},Qr=function(t,e,n){this.target=t,this.type=e,this.selection=n};function Kr(){ce.stopImmediatePropagation()}var ti=function(){ce.preventDefault(),ce.stopImmediatePropagation()},ei={name:"drag"},ni={name:"space"},ri={name:"handle"},ii={name:"center"};function ai(t){return[+t[0],+t[1]]}function oi(t){return[ai(t[0]),ai(t[1])]}function si(t){return function(e){return Dn(e,ce.touches,t)}}var ci={name:"x",handles:["w","e"].map(yi),input:function(t,e){return null==t?null:[[+t[0],e[0][1]],[+t[1],e[1][1]]]},output:function(t){return t&&[t[0][0],t[1][0]]}},ui={name:"y",handles:["n","s"].map(yi),input:function(t,e){return null==t?null:[[e[0][0],+t[0]],[e[1][0],+t[1]]]},output:function(t){return t&&[t[0][1],t[1][1]]}},li={name:"xy",handles:["n","w","e","s","nw","ne","sw","se"].map(yi),input:function(t){return null==t?null:oi(t)},output:function(t){return t}},hi={overlay:"crosshair",selection:"move",n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},fi={e:"w",w:"e",nw:"ne",ne:"nw",se:"sw",sw:"se"},di={n:"s",s:"n",nw:"sw",ne:"se",se:"ne",sw:"nw"},pi={overlay:1,selection:1,n:null,e:1,s:null,w:-1,nw:-1,ne:1,se:1,sw:-1},gi={overlay:1,selection:1,n:-1,e:null,s:1,w:null,nw:-1,ne:-1,se:1,sw:1};function yi(t){return{type:t}}function vi(){return!ce.ctrlKey&&!ce.button}function mi(){var t=this.ownerSVGElement||this;return t.hasAttribute("viewBox")?[[(t=t.viewBox.baseVal).x,t.y],[t.x+t.width,t.y+t.height]]:[[0,0],[t.width.baseVal.value,t.height.baseVal.value]]}function bi(){return navigator.maxTouchPoints||"ontouchstart"in this}function xi(t){for(;!t.__brush;)if(!(t=t.parentNode))return;return t.__brush}function _i(t){return t[0][0]===t[1][0]||t[0][1]===t[1][1]}function ki(t){var e=t.__brush;return e?e.dim.output(e.selection):null}function wi(){return Ci(ci)}function Ei(){return Ci(ui)}var Ti=function(){return Ci(li)};function Ci(t){var e,n=mi,r=vi,i=bi,a=!0,o=lt("start","brush","end"),s=6;function c(e){var n=e.property("__brush",g).selectAll(".overlay").data([yi("overlay")]);n.enter().append("rect").attr("class","overlay").attr("pointer-events","all").attr("cursor",hi.overlay).merge(n).each((function(){var t=xi(this).extent;ke(this).attr("x",t[0][0]).attr("y",t[0][1]).attr("width",t[1][0]-t[0][0]).attr("height",t[1][1]-t[0][1])})),e.selectAll(".selection").data([yi("selection")]).enter().append("rect").attr("class","selection").attr("cursor",hi.selection).attr("fill","#777").attr("fill-opacity",.3).attr("stroke","#fff").attr("shape-rendering","crispEdges");var r=e.selectAll(".handle").data(t.handles,(function(t){return t.type}));r.exit().remove(),r.enter().append("rect").attr("class",(function(t){return"handle handle--"+t.type})).attr("cursor",(function(t){return hi[t.type]})),e.each(u).attr("fill","none").attr("pointer-events","all").on("mousedown.brush",f).filter(i).on("touchstart.brush",f).on("touchmove.brush",d).on("touchend.brush touchcancel.brush",p).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function u(){var t=ke(this),e=xi(this).selection;e?(t.selectAll(".selection").style("display",null).attr("x",e[0][0]).attr("y",e[0][1]).attr("width",e[1][0]-e[0][0]).attr("height",e[1][1]-e[0][1]),t.selectAll(".handle").style("display",null).attr("x",(function(t){return"e"===t.type[t.type.length-1]?e[1][0]-s/2:e[0][0]-s/2})).attr("y",(function(t){return"s"===t.type[0]?e[1][1]-s/2:e[0][1]-s/2})).attr("width",(function(t){return"n"===t.type||"s"===t.type?e[1][0]-e[0][0]+s:s})).attr("height",(function(t){return"e"===t.type||"w"===t.type?e[1][1]-e[0][1]+s:s}))):t.selectAll(".selection,.handle").style("display","none").attr("x",null).attr("y",null).attr("width",null).attr("height",null)}function l(t,e,n){return!n&&t.__brush.emitter||new h(t,e)}function h(t,e){this.that=t,this.args=e,this.state=t.__brush,this.active=0}function f(){if((!e||ce.touches)&&r.apply(this,arguments)){var n,i,o,s,c,h,f,d,p,g,y,v=this,m=ce.target.__data__.type,b="selection"===(a&&ce.metaKey?m="overlay":m)?ei:a&&ce.altKey?ii:ri,x=t===ui?null:pi[m],_=t===ci?null:gi[m],k=xi(v),w=k.extent,E=k.selection,T=w[0][0],C=w[0][1],S=w[1][0],A=w[1][1],M=0,O=0,D=x&&_&&a&&ce.shiftKey,N=ce.touches?si(ce.changedTouches[0].identifier):Nn,B=N(v),L=B,F=l(v,arguments,!0).beforestart();"overlay"===m?(E&&(p=!0),k.selection=E=[[n=t===ui?T:B[0],o=t===ci?C:B[1]],[c=t===ui?S:n,f=t===ci?A:o]]):(n=E[0][0],o=E[0][1],c=E[1][0],f=E[1][1]),i=n,s=o,h=c,d=f;var P=ke(v).attr("pointer-events","none"),I=P.selectAll(".overlay").attr("cursor",hi[m]);if(ce.touches)F.moved=R,F.ended=z;else{var j=ke(ce.view).on("mousemove.brush",R,!0).on("mouseup.brush",z,!0);a&&j.on("keydown.brush",U,!0).on("keyup.brush",$,!0),Te(ce.view)}Kr(),or(v),u.call(v),F.start()}function R(){var t=N(v);!D||g||y||(Math.abs(t[0]-L[0])>Math.abs(t[1]-L[1])?y=!0:g=!0),L=t,p=!0,ti(),Y()}function Y(){var t;switch(M=L[0]-B[0],O=L[1]-B[1],b){case ni:case ei:x&&(M=Math.max(T-n,Math.min(S-c,M)),i=n+M,h=c+M),_&&(O=Math.max(C-o,Math.min(A-f,O)),s=o+O,d=f+O);break;case ri:x<0?(M=Math.max(T-n,Math.min(S-n,M)),i=n+M,h=c):x>0&&(M=Math.max(T-c,Math.min(S-c,M)),i=n,h=c+M),_<0?(O=Math.max(C-o,Math.min(A-o,O)),s=o+O,d=f):_>0&&(O=Math.max(C-f,Math.min(A-f,O)),s=o,d=f+O);break;case ii:x&&(i=Math.max(T,Math.min(S,n-M*x)),h=Math.max(T,Math.min(S,c+M*x))),_&&(s=Math.max(C,Math.min(A,o-O*_)),d=Math.max(C,Math.min(A,f+O*_)))}h0&&(n=i-M),_<0?f=d-O:_>0&&(o=s-O),b=ni,I.attr("cursor",hi.selection),Y());break;default:return}ti()}function $(){switch(ce.keyCode){case 16:D&&(g=y=D=!1,Y());break;case 18:b===ii&&(x<0?c=h:x>0&&(n=i),_<0?f=d:_>0&&(o=s),b=ri,Y());break;case 32:b===ni&&(ce.altKey?(x&&(c=h-M*x,n=i+M*x),_&&(f=d-O*_,o=s+O*_),b=ii):(x<0?c=h:x>0&&(n=i),_<0?f=d:_>0&&(o=s),b=ri),I.attr("cursor",hi[m]),Y());break;default:return}ti()}}function d(){l(this,arguments).moved()}function p(){l(this,arguments).ended()}function g(){var e=this.__brush||{selection:null};return e.extent=oi(n.apply(this,arguments)),e.dim=t,e}return c.move=function(e,n){e.selection?e.on("start.brush",(function(){l(this,arguments).beforestart().start()})).on("interrupt.brush end.brush",(function(){l(this,arguments).end()})).tween("brush",(function(){var e=this,r=e.__brush,i=l(e,arguments),a=r.selection,o=t.input("function"==typeof n?n.apply(this,arguments):n,r.extent),s=An(a,o);function c(t){r.selection=1===t&&null===o?null:s(t),u.call(e),i.brush()}return null!==a&&null!==o?c:c(1)})):e.each((function(){var e=this,r=arguments,i=e.__brush,a=t.input("function"==typeof n?n.apply(e,r):n,i.extent),o=l(e,r).beforestart();or(e),i.selection=null===a?null:a,u.call(e),o.start().brush().end()}))},c.clear=function(t){c.move(t,null)},h.prototype={beforestart:function(){return 1==++this.active&&(this.state.emitter=this,this.starting=!0),this},start:function(){return this.starting?(this.starting=!1,this.emit("start")):this.emit("brush"),this},brush:function(){return this.emit("brush"),this},end:function(){return 0==--this.active&&(delete this.state.emitter,this.emit("end")),this},emit:function(e){pe(new Qr(c,e,t.output(this.state.selection)),o.apply,o,[e,this.that,this.args])}},c.extent=function(t){return arguments.length?(n="function"==typeof t?t:Jr(oi(t)),c):n},c.filter=function(t){return arguments.length?(r="function"==typeof t?t:Jr(!!t),c):r},c.touchable=function(t){return arguments.length?(i="function"==typeof t?t:Jr(!!t),c):i},c.handleSize=function(t){return arguments.length?(s=+t,c):s},c.keyModifiers=function(t){return arguments.length?(a=!!t,c):a},c.on=function(){var t=o.on.apply(o,arguments);return t===o?c:t},c}var Si=Math.cos,Ai=Math.sin,Mi=Math.PI,Oi=Mi/2,Di=2*Mi,Ni=Math.max;function Bi(t){return function(e,n){return t(e.source.value+e.target.value,n.source.value+n.target.value)}}var Li=function(){var t=0,e=null,n=null,r=null;function i(i){var a,o,s,c,u,l,h=i.length,f=[],d=k(h),p=[],g=[],y=g.groups=new Array(h),v=new Array(h*h);for(a=0,u=-1;++u1e-6)if(Math.abs(l*s-c*u)>1e-6&&i){var f=n-a,d=r-o,p=s*s+c*c,g=f*f+d*d,y=Math.sqrt(p),v=Math.sqrt(h),m=i*Math.tan((Ii-Math.acos((p+h-g)/(2*y*v)))/2),b=m/v,x=m/y;Math.abs(b-1)>1e-6&&(this._+="L"+(t+b*u)+","+(e+b*l)),this._+="A"+i+","+i+",0,0,"+ +(l*f>u*d)+","+(this._x1=t+x*s)+","+(this._y1=e+x*c)}else this._+="L"+(this._x1=t)+","+(this._y1=e);else;},arc:function(t,e,n,r,i,a){t=+t,e=+e,a=!!a;var o=(n=+n)*Math.cos(r),s=n*Math.sin(r),c=t+o,u=e+s,l=1^a,h=a?r-i:i-r;if(n<0)throw new Error("negative radius: "+n);null===this._x1?this._+="M"+c+","+u:(Math.abs(this._x1-c)>1e-6||Math.abs(this._y1-u)>1e-6)&&(this._+="L"+c+","+u),n&&(h<0&&(h=h%ji+ji),h>Ri?this._+="A"+n+","+n+",0,1,"+l+","+(t-o)+","+(e-s)+"A"+n+","+n+",0,1,"+l+","+(this._x1=c)+","+(this._y1=u):h>1e-6&&(this._+="A"+n+","+n+",0,"+ +(h>=Ii)+","+l+","+(this._x1=t+n*Math.cos(i))+","+(this._y1=e+n*Math.sin(i))))},rect:function(t,e,n,r){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+e)+"h"+ +n+"v"+ +r+"h"+-n+"Z"},toString:function(){return this._}};var Ui=zi;function $i(t){return t.source}function Wi(t){return t.target}function Vi(t){return t.radius}function Hi(t){return t.startAngle}function Gi(t){return t.endAngle}var qi=function(){var t=$i,e=Wi,n=Vi,r=Hi,i=Gi,a=null;function o(){var o,s=Fi.call(arguments),c=t.apply(this,s),u=e.apply(this,s),l=+n.apply(this,(s[0]=c,s)),h=r.apply(this,s)-Oi,f=i.apply(this,s)-Oi,d=l*Si(h),p=l*Ai(h),g=+n.apply(this,(s[0]=u,s)),y=r.apply(this,s)-Oi,v=i.apply(this,s)-Oi;if(a||(a=o=Ui()),a.moveTo(d,p),a.arc(0,0,l,h,f),h===y&&f===v||(a.quadraticCurveTo(0,0,g*Si(y),g*Ai(y)),a.arc(0,0,g,y,v)),a.quadraticCurveTo(0,0,d,p),a.closePath(),o)return a=null,o+""||null}return o.radius=function(t){return arguments.length?(n="function"==typeof t?t:Pi(+t),o):n},o.startAngle=function(t){return arguments.length?(r="function"==typeof t?t:Pi(+t),o):r},o.endAngle=function(t){return arguments.length?(i="function"==typeof t?t:Pi(+t),o):i},o.source=function(e){return arguments.length?(t=e,o):t},o.target=function(t){return arguments.length?(e=t,o):e},o.context=function(t){return arguments.length?(a=null==t?null:t,o):a},o};function Xi(){}function Zi(t,e){var n=new Xi;if(t instanceof Xi)t.each((function(t,e){n.set(e,t)}));else if(Array.isArray(t)){var r,i=-1,a=t.length;if(null==e)for(;++i=r.length)return null!=t&&n.sort(t),null!=e?e(n):n;for(var c,u,l,h=-1,f=n.length,d=r[i++],p=Ji(),g=o();++hr.length)return n;var o,s=i[a-1];return null!=e&&a>=r.length?o=n.entries():(o=[],n.each((function(e,n){o.push({key:n,values:t(e,a)})}))),null!=s?o.sort((function(t,e){return s(t.key,e.key)})):o}(a(t,0,ea,na),0)},key:function(t){return r.push(t),n},sortKeys:function(t){return i[r.length-1]=t,n},sortValues:function(e){return t=e,n},rollup:function(t){return e=t,n}}};function Ki(){return{}}function ta(t,e,n){t[e]=n}function ea(){return Ji()}function na(t,e,n){t.set(e,n)}function ra(){}var ia=Ji.prototype;function aa(t,e){var n=new ra;if(t instanceof ra)t.each((function(t){n.add(t)}));else if(t){var r=-1,i=t.length;if(null==e)for(;++r6/29*(6/29)*(6/29)?Math.pow(t,1/3):t/(6/29*3*(6/29))+4/29}function va(t){return t>6/29?t*t*t:6/29*3*(6/29)*(t-4/29)}function ma(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function ba(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function xa(t){if(t instanceof wa)return new wa(t.h,t.c,t.l,t.opacity);if(t instanceof ga||(t=fa(t)),0===t.a&&0===t.b)return new wa(NaN,0r!=d>r&&n<(f-u)*(r-l)/(d-l)+u&&(i=-i)}return i}function Ia(t,e,n){var r,i,a,o;return function(t,e,n){return(e[0]-t[0])*(n[1]-t[1])==(n[0]-t[0])*(e[1]-t[1])}(t,e,n)&&(i=t[r=+(t[0]===e[0])],a=n[r],o=e[r],i<=a&&a<=o||o<=a&&a<=i)}var ja=function(){},Ra=[[],[[[1,1.5],[.5,1]]],[[[1.5,1],[1,1.5]]],[[[1.5,1],[.5,1]]],[[[1,.5],[1.5,1]]],[[[1,1.5],[.5,1]],[[1,.5],[1.5,1]]],[[[1,.5],[1,1.5]]],[[[1,.5],[.5,1]]],[[[.5,1],[1,.5]]],[[[1,1.5],[1,.5]]],[[[.5,1],[1,.5]],[[1.5,1],[1,1.5]]],[[[1.5,1],[1,.5]]],[[[.5,1],[1.5,1]]],[[[1,1.5],[1.5,1]]],[[[.5,1],[1,1.5]]],[]],Ya=function(){var t=1,e=1,n=M,r=s;function i(t){var e=n(t);if(Array.isArray(e))e=e.slice().sort(Ba);else{var r=y(t),i=r[0],o=r[1];e=A(i,o,e),e=k(Math.floor(i/e)*e,Math.floor(o/e)*e,e)}return e.map((function(e){return a(t,e)}))}function a(n,i){var a=[],s=[];return function(n,r,i){var a,s,c,u,l,h,f=new Array,d=new Array;a=s=-1,u=n[0]>=r,Ra[u<<1].forEach(p);for(;++a=r,Ra[c|u<<1].forEach(p);Ra[u<<0].forEach(p);for(;++s=r,l=n[s*t]>=r,Ra[u<<1|l<<2].forEach(p);++a=r,h=l,l=n[s*t+a+1]>=r,Ra[c|u<<1|l<<2|h<<3].forEach(p);Ra[u|l<<3].forEach(p)}a=-1,l=n[s*t]>=r,Ra[l<<2].forEach(p);for(;++a=r,Ra[l<<2|h<<3].forEach(p);function p(t){var e,n,r=[t[0][0]+a,t[0][1]+s],c=[t[1][0]+a,t[1][1]+s],u=o(r),l=o(c);(e=d[u])?(n=f[l])?(delete d[e.end],delete f[n.start],e===n?(e.ring.push(c),i(e.ring)):f[e.start]=d[n.end]={start:e.start,end:n.end,ring:e.ring.concat(n.ring)}):(delete d[e.end],e.ring.push(c),d[e.end=l]=e):(e=f[l])?(n=d[u])?(delete f[e.start],delete d[n.end],e===n?(e.ring.push(c),i(e.ring)):f[n.start]=d[e.end]={start:n.start,end:e.end,ring:n.ring.concat(e.ring)}):(delete f[e.start],e.ring.unshift(r),f[e.start=u]=e):f[u]=d[l]={start:u,end:l,ring:[r,c]}}Ra[l<<3].forEach(p)}(n,i,(function(t){r(t,n,i),function(t){for(var e=0,n=t.length,r=t[n-1][1]*t[0][0]-t[n-1][0]*t[0][1];++e0?a.push([t]):s.push(t)})),s.forEach((function(t){for(var e,n=0,r=a.length;n0&&o0&&s0&&a>0))throw new Error("invalid size");return t=r,e=a,i},i.thresholds=function(t){return arguments.length?(n="function"==typeof t?t:Array.isArray(t)?La(Na.call(t)):La(t),i):n},i.smooth=function(t){return arguments.length?(r=t?s:ja,i):r===s},i};function za(t,e,n){for(var r=t.width,i=t.height,a=1+(n<<1),o=0;o=n&&(s>=a&&(c-=t.data[s-a+o*r]),e.data[s-n+o*r]=c/Math.min(s+1,r-1+a-s,a))}function Ua(t,e,n){for(var r=t.width,i=t.height,a=1+(n<<1),o=0;o=n&&(s>=a&&(c-=t.data[o+(s-a)*r]),e.data[o+(s-n)*r]=c/Math.min(s+1,i-1+a-s,a))}function $a(t){return t[0]}function Wa(t){return t[1]}function Va(){return 1}var Ha=function(){var t=$a,e=Wa,n=Va,r=960,i=500,a=20,o=2,s=3*a,c=r+2*s>>o,u=i+2*s>>o,l=La(20);function h(r){var i=new Float32Array(c*u),h=new Float32Array(c*u);r.forEach((function(r,a,l){var h=+t(r,a,l)+s>>o,f=+e(r,a,l)+s>>o,d=+n(r,a,l);h>=0&&h=0&&f>o),Ua({width:c,height:u,data:h},{width:c,height:u,data:i},a>>o),za({width:c,height:u,data:i},{width:c,height:u,data:h},a>>o),Ua({width:c,height:u,data:h},{width:c,height:u,data:i},a>>o),za({width:c,height:u,data:i},{width:c,height:u,data:h},a>>o),Ua({width:c,height:u,data:h},{width:c,height:u,data:i},a>>o);var d=l(i);if(!Array.isArray(d)){var p=L(i);d=A(0,p,d),(d=k(0,Math.floor(p/d)*d,d)).shift()}return Ya().thresholds(d).size([c,u])(i).map(f)}function f(t){return t.value*=Math.pow(2,-2*o),t.coordinates.forEach(d),t}function d(t){t.forEach(p)}function p(t){t.forEach(g)}function g(t){t[0]=t[0]*Math.pow(2,o)-s,t[1]=t[1]*Math.pow(2,o)-s}function y(){return c=r+2*(s=3*a)>>o,u=i+2*s>>o,h}return h.x=function(e){return arguments.length?(t="function"==typeof e?e:La(+e),h):t},h.y=function(t){return arguments.length?(e="function"==typeof t?t:La(+t),h):e},h.weight=function(t){return arguments.length?(n="function"==typeof t?t:La(+t),h):n},h.size=function(t){if(!arguments.length)return[r,i];var e=Math.ceil(t[0]),n=Math.ceil(t[1]);if(!(e>=0||e>=0))throw new Error("invalid size");return r=e,i=n,y()},h.cellSize=function(t){if(!arguments.length)return 1<=1))throw new Error("invalid cell size");return o=Math.floor(Math.log(t)/Math.LN2),y()},h.thresholds=function(t){return arguments.length?(l="function"==typeof t?t:Array.isArray(t)?La(Na.call(t)):La(t),h):l},h.bandwidth=function(t){if(!arguments.length)return Math.sqrt(a*(a+1));if(!((t=+t)>=0))throw new Error("invalid bandwidth");return a=Math.round((Math.sqrt(4*t*t+1)-1)/2),y()},h},Ga=function(t){return function(){return t}};function qa(t,e,n,r,i,a,o,s,c,u){this.target=t,this.type=e,this.subject=n,this.identifier=r,this.active=i,this.x=a,this.y=o,this.dx=s,this.dy=c,this._=u}function Xa(){return!ce.ctrlKey&&!ce.button}function Za(){return this.parentNode}function Ja(t){return null==t?{x:ce.x,y:ce.y}:t}function Qa(){return navigator.maxTouchPoints||"ontouchstart"in this}qa.prototype.on=function(){var t=this._.on.apply(this._,arguments);return t===this._?this:t};var Ka=function(){var t,e,n,r,i=Xa,a=Za,o=Ja,s=Qa,c={},u=lt("start","drag","end"),l=0,h=0;function f(t){t.on("mousedown.drag",d).filter(s).on("touchstart.drag",y).on("touchmove.drag",v).on("touchend.drag touchcancel.drag",m).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function d(){if(!r&&i.apply(this,arguments)){var o=b("mouse",a.apply(this,arguments),Nn,this,arguments);o&&(ke(ce.view).on("mousemove.drag",p,!0).on("mouseup.drag",g,!0),Te(ce.view),we(),n=!1,t=ce.clientX,e=ce.clientY,o("start"))}}function p(){if(Ee(),!n){var r=ce.clientX-t,i=ce.clientY-e;n=r*r+i*i>h}c.mouse("drag")}function g(){ke(ce.view).on("mousemove.drag mouseup.drag",null),Ce(ce.view,n),Ee(),c.mouse("end")}function y(){if(i.apply(this,arguments)){var t,e,n=ce.changedTouches,r=a.apply(this,arguments),o=n.length;for(t=0;t9999?"+"+io(e,6):io(e,4))+"-"+io(t.getUTCMonth()+1,2)+"-"+io(t.getUTCDate(),2)+(a?"T"+io(n,2)+":"+io(r,2)+":"+io(i,2)+"."+io(a,3)+"Z":i?"T"+io(n,2)+":"+io(r,2)+":"+io(i,2)+"Z":r||n?"T"+io(n,2)+":"+io(r,2)+"Z":"")}var oo=function(t){var e=new RegExp('["'+t+"\n\r]"),n=t.charCodeAt(0);function r(t,e){var r,i=[],a=t.length,o=0,s=0,c=a<=0,u=!1;function l(){if(c)return eo;if(u)return u=!1,to;var e,r,i=o;if(34===t.charCodeAt(i)){for(;o++=a?c=!0:10===(r=t.charCodeAt(o++))?u=!0:13===r&&(u=!0,10===t.charCodeAt(o)&&++o),t.slice(i+1,e-1).replace(/""/g,'"')}for(;o=(a=(g+v)/2))?g=a:v=a,(l=n>=(o=(y+m)/2))?y=o:m=o,i=d,!(d=d[h=l<<1|u]))return i[h]=p,t;if(s=+t._x.call(null,d.data),c=+t._y.call(null,d.data),e===s&&n===c)return p.next=d,i?i[h]=p:t._root=p,t;do{i=i?i[h]=new Array(4):t._root=new Array(4),(u=e>=(a=(g+v)/2))?g=a:v=a,(l=n>=(o=(y+m)/2))?y=o:m=o}while((h=l<<1|u)==(f=(c>=o)<<1|s>=a));return i[f]=d,i[h]=p,t}var _s=function(t,e,n,r,i){this.node=t,this.x0=e,this.y0=n,this.x1=r,this.y1=i};function ks(t){return t[0]}function ws(t){return t[1]}function Es(t,e,n){var r=new Ts(null==e?ks:e,null==n?ws:n,NaN,NaN,NaN,NaN);return null==t?r:r.addAll(t)}function Ts(t,e,n,r,i,a){this._x=t,this._y=e,this._x0=n,this._y0=r,this._x1=i,this._y1=a,this._root=void 0}function Cs(t){for(var e={data:t.data},n=e;t=t.next;)n=n.next={data:t.data};return e}var Ss=Es.prototype=Ts.prototype;function As(t){return t.x+t.vx}function Ms(t){return t.y+t.vy}Ss.copy=function(){var t,e,n=new Ts(this._x,this._y,this._x0,this._y0,this._x1,this._y1),r=this._root;if(!r)return n;if(!r.length)return n._root=Cs(r),n;for(t=[{source:r,target:n._root=new Array(4)}];r=t.pop();)for(var i=0;i<4;++i)(e=r.source[i])&&(e.length?t.push({source:e,target:r.target[i]=new Array(4)}):r.target[i]=Cs(e));return n},Ss.add=function(t){var e=+this._x.call(null,t),n=+this._y.call(null,t);return xs(this.cover(e,n),e,n,t)},Ss.addAll=function(t){var e,n,r,i,a=t.length,o=new Array(a),s=new Array(a),c=1/0,u=1/0,l=-1/0,h=-1/0;for(n=0;nl&&(l=r),ih&&(h=i));if(c>l||u>h)return this;for(this.cover(c,u).cover(l,h),n=0;nt||t>=i||r>e||e>=a;)switch(s=(ef||(a=c.y0)>d||(o=c.x1)=v)<<1|t>=y)&&(c=p[p.length-1],p[p.length-1]=p[p.length-1-u],p[p.length-1-u]=c)}else{var m=t-+this._x.call(null,g.data),b=e-+this._y.call(null,g.data),x=m*m+b*b;if(x=(s=(p+y)/2))?p=s:y=s,(l=o>=(c=(g+v)/2))?g=c:v=c,e=d,!(d=d[h=l<<1|u]))return this;if(!d.length)break;(e[h+1&3]||e[h+2&3]||e[h+3&3])&&(n=e,f=h)}for(;d.data!==t;)if(r=d,!(d=d.next))return this;return(i=d.next)&&delete d.next,r?(i?r.next=i:delete r.next,this):e?(i?e[h]=i:delete e[h],(d=e[0]||e[1]||e[2]||e[3])&&d===(e[3]||e[2]||e[1]||e[0])&&!d.length&&(n?n[f]=d:this._root=d),this):(this._root=i,this)},Ss.removeAll=function(t){for(var e=0,n=t.length;ec+d||iu+d||as.index){var p=c-o.x-o.vx,g=u-o.y-o.vy,y=p*p+g*g;yt.r&&(t.r=t[e].r)}function s(){if(e){var r,i,a=e.length;for(n=new Array(a),r=0;r1?(null==n?s.remove(t):s.set(t,d(n)),e):s.get(t)},find:function(e,n,r){var i,a,o,s,c,u=0,l=t.length;for(null==r?r=1/0:r*=r,u=0;u1?(u.on(t,n),e):u.on(t)}}},js=function(){var t,e,n,r,i=ms(-30),a=1,o=1/0,s=.81;function c(r){var i,a=t.length,o=Es(t,Ls,Fs).visitAfter(l);for(n=r,i=0;i=o)){(t.data!==e||t.next)&&(0===l&&(d+=(l=bs())*l),0===h&&(d+=(h=bs())*h),d1?r[0]+r.slice(2):r,+t.slice(n+1)]},$s=function(t){return(t=Us(Math.abs(t)))?t[1]:NaN},Ws=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Vs(t){if(!(e=Ws.exec(t)))throw new Error("invalid format: "+t);var e;return new Hs({fill:e[1],align:e[2],sign:e[3],symbol:e[4],zero:e[5],width:e[6],comma:e[7],precision:e[8]&&e[8].slice(1),trim:e[9],type:e[10]})}function Hs(t){this.fill=void 0===t.fill?" ":t.fill+"",this.align=void 0===t.align?">":t.align+"",this.sign=void 0===t.sign?"-":t.sign+"",this.symbol=void 0===t.symbol?"":t.symbol+"",this.zero=!!t.zero,this.width=void 0===t.width?void 0:+t.width,this.comma=!!t.comma,this.precision=void 0===t.precision?void 0:+t.precision,this.trim=!!t.trim,this.type=void 0===t.type?"":t.type+""}Vs.prototype=Hs.prototype,Hs.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type};var Gs,qs,Xs,Zs,Js=function(t,e){var n=Us(t,e);if(!n)return t+"";var r=n[0],i=n[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")},Qs={"%":function(t,e){return(100*t).toFixed(e)},b:function(t){return Math.round(t).toString(2)},c:function(t){return t+""},d:function(t){return Math.round(t).toString(10)},e:function(t,e){return t.toExponential(e)},f:function(t,e){return t.toFixed(e)},g:function(t,e){return t.toPrecision(e)},o:function(t){return Math.round(t).toString(8)},p:function(t,e){return Js(100*t,e)},r:Js,s:function(t,e){var n=Us(t,e);if(!n)return t+"";var r=n[0],i=n[1],a=i-(Gs=3*Math.max(-8,Math.min(8,Math.floor(i/3))))+1,o=r.length;return a===o?r:a>o?r+new Array(a-o+1).join("0"):a>0?r.slice(0,a)+"."+r.slice(a):"0."+new Array(1-a).join("0")+Us(t,Math.max(0,e+a-1))[0]},X:function(t){return Math.round(t).toString(16).toUpperCase()},x:function(t){return Math.round(t).toString(16)}},Ks=function(t){return t},tc=Array.prototype.map,ec=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"],nc=function(t){var e,n,r=void 0===t.grouping||void 0===t.thousands?Ks:(e=tc.call(t.grouping,Number),n=t.thousands+"",function(t,r){for(var i=t.length,a=[],o=0,s=e[0],c=0;i>0&&s>0&&(c+s+1>r&&(s=Math.max(1,r-c)),a.push(t.substring(i-=s,i+s)),!((c+=s+1)>r));)s=e[o=(o+1)%e.length];return a.reverse().join(n)}),i=void 0===t.currency?"":t.currency[0]+"",a=void 0===t.currency?"":t.currency[1]+"",o=void 0===t.decimal?".":t.decimal+"",s=void 0===t.numerals?Ks:function(t){return function(e){return e.replace(/[0-9]/g,(function(e){return t[+e]}))}}(tc.call(t.numerals,String)),c=void 0===t.percent?"%":t.percent+"",u=void 0===t.minus?"-":t.minus+"",l=void 0===t.nan?"NaN":t.nan+"";function h(t){var e=(t=Vs(t)).fill,n=t.align,h=t.sign,f=t.symbol,d=t.zero,p=t.width,g=t.comma,y=t.precision,v=t.trim,m=t.type;"n"===m?(g=!0,m="g"):Qs[m]||(void 0===y&&(y=12),v=!0,m="g"),(d||"0"===e&&"="===n)&&(d=!0,e="0",n="=");var b="$"===f?i:"#"===f&&/[boxX]/.test(m)?"0"+m.toLowerCase():"",x="$"===f?a:/[%p]/.test(m)?c:"",_=Qs[m],k=/[defgprs%]/.test(m);function w(t){var i,a,c,f=b,w=x;if("c"===m)w=_(t)+w,t="";else{var E=(t=+t)<0;if(t=isNaN(t)?l:_(Math.abs(t),y),v&&(t=function(t){t:for(var e,n=t.length,r=1,i=-1;r0&&(i=0)}return i>0?t.slice(0,i)+t.slice(e+1):t}(t)),E&&0==+t&&(E=!1),f=(E?"("===h?h:u:"-"===h||"("===h?"":h)+f,w=("s"===m?ec[8+Gs/3]:"")+w+(E&&"("===h?")":""),k)for(i=-1,a=t.length;++i(c=t.charCodeAt(i))||c>57){w=(46===c?o+t.slice(i+1):t.slice(i))+w,t=t.slice(0,i);break}}g&&!d&&(t=r(t,1/0));var T=f.length+t.length+w.length,C=T>1)+f+t+w+C.slice(T);break;default:t=C+f+t+w}return s(t)}return y=void 0===y?6:/[gprs]/.test(m)?Math.max(1,Math.min(21,y)):Math.max(0,Math.min(20,y)),w.toString=function(){return t+""},w}return{format:h,formatPrefix:function(t,e){var n=h(((t=Vs(t)).type="f",t)),r=3*Math.max(-8,Math.min(8,Math.floor($s(e)/3))),i=Math.pow(10,-r),a=ec[8+r/3];return function(t){return n(i*t)+a}}}};function rc(t){return qs=nc(t),Xs=qs.format,Zs=qs.formatPrefix,qs}rc({decimal:".",thousands:",",grouping:[3],currency:["$",""],minus:"-"});var ic=function(t){return Math.max(0,-$s(Math.abs(t)))},ac=function(t,e){return Math.max(0,3*Math.max(-8,Math.min(8,Math.floor($s(e)/3)))-$s(Math.abs(t)))},oc=function(t,e){return t=Math.abs(t),e=Math.abs(e)-t,Math.max(0,$s(e)-$s(t))+1},sc=function(){return new cc};function cc(){this.reset()}cc.prototype={constructor:cc,reset:function(){this.s=this.t=0},add:function(t){lc(uc,t,this.t),lc(this,uc.s,this.s),this.s?this.t+=uc.t:this.s=uc.t},valueOf:function(){return this.s}};var uc=new cc;function lc(t,e,n){var r=t.s=e+n,i=r-e,a=r-i;t.t=e-a+(n-i)}var hc=Math.PI,fc=hc/2,dc=hc/4,pc=2*hc,gc=180/hc,yc=hc/180,vc=Math.abs,mc=Math.atan,bc=Math.atan2,xc=Math.cos,_c=Math.ceil,kc=Math.exp,wc=(Math.floor,Math.log),Ec=Math.pow,Tc=Math.sin,Cc=Math.sign||function(t){return t>0?1:t<0?-1:0},Sc=Math.sqrt,Ac=Math.tan;function Mc(t){return t>1?0:t<-1?hc:Math.acos(t)}function Oc(t){return t>1?fc:t<-1?-fc:Math.asin(t)}function Dc(t){return(t=Tc(t/2))*t}function Nc(){}function Bc(t,e){t&&Fc.hasOwnProperty(t.type)&&Fc[t.type](t,e)}var Lc={Feature:function(t,e){Bc(t.geometry,e)},FeatureCollection:function(t,e){for(var n=t.features,r=-1,i=n.length;++r=0?1:-1,i=r*n,a=xc(e=(e*=yc)/2+dc),o=Tc(e),s=Uc*o,c=zc*a+s*xc(i),u=s*r*Tc(i);Wc.add(bc(u,c)),Yc=t,zc=a,Uc=o}var Jc=function(t){return Vc.reset(),$c(t,Hc),2*Vc};function Qc(t){return[bc(t[1],t[0]),Oc(t[2])]}function Kc(t){var e=t[0],n=t[1],r=xc(n);return[r*xc(e),r*Tc(e),Tc(n)]}function tu(t,e){return t[0]*e[0]+t[1]*e[1]+t[2]*e[2]}function eu(t,e){return[t[1]*e[2]-t[2]*e[1],t[2]*e[0]-t[0]*e[2],t[0]*e[1]-t[1]*e[0]]}function nu(t,e){t[0]+=e[0],t[1]+=e[1],t[2]+=e[2]}function ru(t,e){return[t[0]*e,t[1]*e,t[2]*e]}function iu(t){var e=Sc(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=e,t[1]/=e,t[2]/=e}var au,ou,su,cu,uu,lu,hu,fu,du,pu,gu=sc(),yu={point:vu,lineStart:bu,lineEnd:xu,polygonStart:function(){yu.point=_u,yu.lineStart=ku,yu.lineEnd=wu,gu.reset(),Hc.polygonStart()},polygonEnd:function(){Hc.polygonEnd(),yu.point=vu,yu.lineStart=bu,yu.lineEnd=xu,Wc<0?(au=-(su=180),ou=-(cu=90)):gu>1e-6?cu=90:gu<-1e-6&&(ou=-90),pu[0]=au,pu[1]=su},sphere:function(){au=-(su=180),ou=-(cu=90)}};function vu(t,e){du.push(pu=[au=t,su=t]),ecu&&(cu=e)}function mu(t,e){var n=Kc([t*yc,e*yc]);if(fu){var r=eu(fu,n),i=eu([r[1],-r[0],0],r);iu(i),i=Qc(i);var a,o=t-uu,s=o>0?1:-1,c=i[0]*gc*s,u=vc(o)>180;u^(s*uucu&&(cu=a):u^(s*uu<(c=(c+360)%360-180)&&ccu&&(cu=e)),u?tEu(au,su)&&(su=t):Eu(t,su)>Eu(au,su)&&(au=t):su>=au?(tsu&&(su=t)):t>uu?Eu(au,t)>Eu(au,su)&&(su=t):Eu(t,su)>Eu(au,su)&&(au=t)}else du.push(pu=[au=t,su=t]);ecu&&(cu=e),fu=n,uu=t}function bu(){yu.point=mu}function xu(){pu[0]=au,pu[1]=su,yu.point=vu,fu=null}function _u(t,e){if(fu){var n=t-uu;gu.add(vc(n)>180?n+(n>0?360:-360):n)}else lu=t,hu=e;Hc.point(t,e),mu(t,e)}function ku(){Hc.lineStart()}function wu(){_u(lu,hu),Hc.lineEnd(),vc(gu)>1e-6&&(au=-(su=180)),pu[0]=au,pu[1]=su,fu=null}function Eu(t,e){return(e-=t)<0?e+360:e}function Tu(t,e){return t[0]-e[0]}function Cu(t,e){return t[0]<=t[1]?t[0]<=e&&e<=t[1]:eEu(r[0],r[1])&&(r[1]=i[1]),Eu(i[0],r[1])>Eu(r[0],r[1])&&(r[0]=i[0])):a.push(r=i);for(o=-1/0,e=0,r=a[n=a.length-1];e<=n;r=i,++e)i=a[e],(s=Eu(r[1],i[0]))>o&&(o=s,au=i[0],su=r[1])}return du=pu=null,au===1/0||ou===1/0?[[NaN,NaN],[NaN,NaN]]:[[au,ou],[su,cu]]},Wu={sphere:Nc,point:Vu,lineStart:Gu,lineEnd:Zu,polygonStart:function(){Wu.lineStart=Ju,Wu.lineEnd=Qu},polygonEnd:function(){Wu.lineStart=Gu,Wu.lineEnd=Zu}};function Vu(t,e){t*=yc;var n=xc(e*=yc);Hu(n*xc(t),n*Tc(t),Tc(e))}function Hu(t,e,n){++Su,Mu+=(t-Mu)/Su,Ou+=(e-Ou)/Su,Du+=(n-Du)/Su}function Gu(){Wu.point=qu}function qu(t,e){t*=yc;var n=xc(e*=yc);Yu=n*xc(t),zu=n*Tc(t),Uu=Tc(e),Wu.point=Xu,Hu(Yu,zu,Uu)}function Xu(t,e){t*=yc;var n=xc(e*=yc),r=n*xc(t),i=n*Tc(t),a=Tc(e),o=bc(Sc((o=zu*a-Uu*i)*o+(o=Uu*r-Yu*a)*o+(o=Yu*i-zu*r)*o),Yu*r+zu*i+Uu*a);Au+=o,Nu+=o*(Yu+(Yu=r)),Bu+=o*(zu+(zu=i)),Lu+=o*(Uu+(Uu=a)),Hu(Yu,zu,Uu)}function Zu(){Wu.point=Vu}function Ju(){Wu.point=Ku}function Qu(){tl(ju,Ru),Wu.point=Vu}function Ku(t,e){ju=t,Ru=e,t*=yc,e*=yc,Wu.point=tl;var n=xc(e);Yu=n*xc(t),zu=n*Tc(t),Uu=Tc(e),Hu(Yu,zu,Uu)}function tl(t,e){t*=yc;var n=xc(e*=yc),r=n*xc(t),i=n*Tc(t),a=Tc(e),o=zu*a-Uu*i,s=Uu*r-Yu*a,c=Yu*i-zu*r,u=Sc(o*o+s*s+c*c),l=Oc(u),h=u&&-l/u;Fu+=h*o,Pu+=h*s,Iu+=h*c,Au+=l,Nu+=l*(Yu+(Yu=r)),Bu+=l*(zu+(zu=i)),Lu+=l*(Uu+(Uu=a)),Hu(Yu,zu,Uu)}var el=function(t){Su=Au=Mu=Ou=Du=Nu=Bu=Lu=Fu=Pu=Iu=0,$c(t,Wu);var e=Fu,n=Pu,r=Iu,i=e*e+n*n+r*r;return i<1e-12&&(e=Nu,n=Bu,r=Lu,Au<1e-6&&(e=Mu,n=Ou,r=Du),(i=e*e+n*n+r*r)<1e-12)?[NaN,NaN]:[bc(n,e)*gc,Oc(r/Sc(i))*gc]},nl=function(t){return function(){return t}},rl=function(t,e){function n(n,r){return n=t(n,r),e(n[0],n[1])}return t.invert&&e.invert&&(n.invert=function(n,r){return(n=e.invert(n,r))&&t.invert(n[0],n[1])}),n};function il(t,e){return[vc(t)>hc?t+Math.round(-t/pc)*pc:t,e]}function al(t,e,n){return(t%=pc)?e||n?rl(sl(t),cl(e,n)):sl(t):e||n?cl(e,n):il}function ol(t){return function(e,n){return[(e+=t)>hc?e-pc:e<-hc?e+pc:e,n]}}function sl(t){var e=ol(t);return e.invert=ol(-t),e}function cl(t,e){var n=xc(t),r=Tc(t),i=xc(e),a=Tc(e);function o(t,e){var o=xc(e),s=xc(t)*o,c=Tc(t)*o,u=Tc(e),l=u*n+s*r;return[bc(c*i-l*a,s*n-u*r),Oc(l*i+c*a)]}return o.invert=function(t,e){var o=xc(e),s=xc(t)*o,c=Tc(t)*o,u=Tc(e),l=u*i-c*a;return[bc(c*i+u*a,s*n+l*r),Oc(l*n-s*r)]},o}il.invert=il;var ul=function(t){function e(e){return(e=t(e[0]*yc,e[1]*yc))[0]*=gc,e[1]*=gc,e}return t=al(t[0]*yc,t[1]*yc,t.length>2?t[2]*yc:0),e.invert=function(e){return(e=t.invert(e[0]*yc,e[1]*yc))[0]*=gc,e[1]*=gc,e},e};function ll(t,e,n,r,i,a){if(n){var o=xc(e),s=Tc(e),c=r*n;null==i?(i=e+r*pc,a=e-c/2):(i=hl(o,i),a=hl(o,a),(r>0?ia)&&(i+=r*pc));for(var u,l=i;r>0?l>a:l1&&e.push(e.pop().concat(e.shift()))},result:function(){var n=e;return e=[],t=null,n}}},pl=function(t,e){return vc(t[0]-e[0])<1e-6&&vc(t[1]-e[1])<1e-6};function gl(t,e,n,r){this.x=t,this.z=e,this.o=n,this.e=r,this.v=!1,this.n=this.p=null}var yl=function(t,e,n,r,i){var a,o,s=[],c=[];if(t.forEach((function(t){if(!((e=t.length-1)<=0)){var e,n,r=t[0],o=t[e];if(pl(r,o)){for(i.lineStart(),a=0;a=0;--a)i.point((l=u[a])[0],l[1]);else r(f.x,f.p.x,-1,i);f=f.p}u=(f=f.o).z,d=!d}while(!f.v);i.lineEnd()}}};function vl(t){if(e=t.length){for(var e,n,r=0,i=t[0];++r=0?1:-1,T=E*w,C=T>hc,S=g*_;if(ml.add(bc(S*E*Tc(T),y*k+S*xc(T))),o+=C?w+E*pc:w,C^d>=n^b>=n){var A=eu(Kc(f),Kc(m));iu(A);var M=eu(a,A);iu(M);var O=(C^w>=0?-1:1)*Oc(M[2]);(r>O||r===O&&(A[0]||A[1]))&&(s+=C^w>=0?1:-1)}}return(o<-1e-6||o<1e-6&&ml<-1e-6)^1&s},_l=function(t,e,n,r){return function(i){var a,o,s,c=e(i),u=dl(),l=e(u),h=!1,f={point:d,lineStart:g,lineEnd:y,polygonStart:function(){f.point=v,f.lineStart=m,f.lineEnd=b,o=[],a=[]},polygonEnd:function(){f.point=d,f.lineStart=g,f.lineEnd=y,o=I(o);var t=xl(a,r);o.length?(h||(i.polygonStart(),h=!0),yl(o,wl,t,n,i)):t&&(h||(i.polygonStart(),h=!0),i.lineStart(),n(null,null,1,i),i.lineEnd()),h&&(i.polygonEnd(),h=!1),o=a=null},sphere:function(){i.polygonStart(),i.lineStart(),n(null,null,1,i),i.lineEnd(),i.polygonEnd()}};function d(e,n){t(e,n)&&i.point(e,n)}function p(t,e){c.point(t,e)}function g(){f.point=p,c.lineStart()}function y(){f.point=d,c.lineEnd()}function v(t,e){s.push([t,e]),l.point(t,e)}function m(){l.lineStart(),s=[]}function b(){v(s[0][0],s[0][1]),l.lineEnd();var t,e,n,r,c=l.clean(),f=u.result(),d=f.length;if(s.pop(),a.push(s),s=null,d)if(1&c){if((e=(n=f[0]).length-1)>0){for(h||(i.polygonStart(),h=!0),i.lineStart(),t=0;t1&&2&c&&f.push(f.pop().concat(f.shift())),o.push(f.filter(kl))}return f}};function kl(t){return t.length>1}function wl(t,e){return((t=t.x)[0]<0?t[1]-fc-1e-6:fc-t[1])-((e=e.x)[0]<0?e[1]-fc-1e-6:fc-e[1])}var El=_l((function(){return!0}),(function(t){var e,n=NaN,r=NaN,i=NaN;return{lineStart:function(){t.lineStart(),e=1},point:function(a,o){var s=a>0?hc:-hc,c=vc(a-n);vc(c-hc)<1e-6?(t.point(n,r=(r+o)/2>0?fc:-fc),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(s,r),t.point(a,r),e=0):i!==s&&c>=hc&&(vc(n-i)<1e-6&&(n-=1e-6*i),vc(a-s)<1e-6&&(a-=1e-6*s),r=function(t,e,n,r){var i,a,o=Tc(t-n);return vc(o)>1e-6?mc((Tc(e)*(a=xc(r))*Tc(n)-Tc(r)*(i=xc(e))*Tc(t))/(i*a*o)):(e+r)/2}(n,r,a,o),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(s,r),e=0),t.point(n=a,r=o),i=s},lineEnd:function(){t.lineEnd(),n=r=NaN},clean:function(){return 2-e}}}),(function(t,e,n,r){var i;if(null==t)i=n*fc,r.point(-hc,i),r.point(0,i),r.point(hc,i),r.point(hc,0),r.point(hc,-i),r.point(0,-i),r.point(-hc,-i),r.point(-hc,0),r.point(-hc,i);else if(vc(t[0]-e[0])>1e-6){var a=t[0]0,i=vc(e)>1e-6;function a(t,n){return xc(t)*xc(n)>e}function o(t,n,r){var i=[1,0,0],a=eu(Kc(t),Kc(n)),o=tu(a,a),s=a[0],c=o-s*s;if(!c)return!r&&t;var u=e*o/c,l=-e*s/c,h=eu(i,a),f=ru(i,u);nu(f,ru(a,l));var d=h,p=tu(f,d),g=tu(d,d),y=p*p-g*(tu(f,f)-1);if(!(y<0)){var v=Sc(y),m=ru(d,(-p-v)/g);if(nu(m,f),m=Qc(m),!r)return m;var b,x=t[0],_=n[0],k=t[1],w=n[1];_0^m[1]<(vc(m[0]-x)<1e-6?k:w):k<=m[1]&&m[1]<=w:E>hc^(x<=m[0]&&m[0]<=_)){var C=ru(d,(-p+v)/g);return nu(C,f),[m,Qc(C)]}}}function s(e,n){var i=r?t:hc-t,a=0;return e<-i?a|=1:e>i&&(a|=2),n<-i?a|=4:n>i&&(a|=8),a}return _l(a,(function(t){var e,n,c,u,l;return{lineStart:function(){u=c=!1,l=1},point:function(h,f){var d,p=[h,f],g=a(h,f),y=r?g?0:s(h,f):g?s(h+(h<0?hc:-hc),f):0;if(!e&&(u=c=g)&&t.lineStart(),g!==c&&(!(d=o(e,p))||pl(e,d)||pl(p,d))&&(p[0]+=1e-6,p[1]+=1e-6,g=a(p[0],p[1])),g!==c)l=0,g?(t.lineStart(),d=o(p,e),t.point(d[0],d[1])):(d=o(e,p),t.point(d[0],d[1]),t.lineEnd()),e=d;else if(i&&e&&r^g){var v;y&n||!(v=o(p,e,!0))||(l=0,r?(t.lineStart(),t.point(v[0][0],v[0][1]),t.point(v[1][0],v[1][1]),t.lineEnd()):(t.point(v[1][0],v[1][1]),t.lineEnd(),t.lineStart(),t.point(v[0][0],v[0][1])))}!g||e&&pl(e,p)||t.point(p[0],p[1]),e=p,c=g,n=y},lineEnd:function(){c&&t.lineEnd(),e=null},clean:function(){return l|(u&&c)<<1}}}),(function(e,r,i,a){ll(a,t,n,i,e,r)}),r?[0,-t]:[-hc,t-hc])};function Cl(t,e,n,r){function i(i,a){return t<=i&&i<=n&&e<=a&&a<=r}function a(i,a,s,u){var l=0,h=0;if(null==i||(l=o(i,s))!==(h=o(a,s))||c(i,a)<0^s>0)do{u.point(0===l||3===l?t:n,l>1?r:e)}while((l=(l+s+4)%4)!==h);else u.point(a[0],a[1])}function o(r,i){return vc(r[0]-t)<1e-6?i>0?0:3:vc(r[0]-n)<1e-6?i>0?2:1:vc(r[1]-e)<1e-6?i>0?1:0:i>0?3:2}function s(t,e){return c(t.x,e.x)}function c(t,e){var n=o(t,1),r=o(e,1);return n!==r?n-r:0===n?e[1]-t[1]:1===n?t[0]-e[0]:2===n?t[1]-e[1]:e[0]-t[0]}return function(o){var c,u,l,h,f,d,p,g,y,v,m,b=o,x=dl(),_={point:k,lineStart:function(){_.point=w,u&&u.push(l=[]);v=!0,y=!1,p=g=NaN},lineEnd:function(){c&&(w(h,f),d&&y&&x.rejoin(),c.push(x.result()));_.point=k,y&&b.lineEnd()},polygonStart:function(){b=x,c=[],u=[],m=!0},polygonEnd:function(){var e=function(){for(var e=0,n=0,i=u.length;nr&&(f-a)*(r-o)>(d-o)*(t-a)&&++e:d<=r&&(f-a)*(r-o)<(d-o)*(t-a)&&--e;return e}(),n=m&&e,i=(c=I(c)).length;(n||i)&&(o.polygonStart(),n&&(o.lineStart(),a(null,null,1,o),o.lineEnd()),i&&yl(c,s,e,a,o),o.polygonEnd());b=o,c=u=l=null}};function k(t,e){i(t,e)&&b.point(t,e)}function w(a,o){var s=i(a,o);if(u&&l.push([a,o]),v)h=a,f=o,d=s,v=!1,s&&(b.lineStart(),b.point(a,o));else if(s&&y)b.point(a,o);else{var c=[p=Math.max(-1e9,Math.min(1e9,p)),g=Math.max(-1e9,Math.min(1e9,g))],x=[a=Math.max(-1e9,Math.min(1e9,a)),o=Math.max(-1e9,Math.min(1e9,o))];!function(t,e,n,r,i,a){var o,s=t[0],c=t[1],u=0,l=1,h=e[0]-s,f=e[1]-c;if(o=n-s,h||!(o>0)){if(o/=h,h<0){if(o0){if(o>l)return;o>u&&(u=o)}if(o=i-s,h||!(o<0)){if(o/=h,h<0){if(o>l)return;o>u&&(u=o)}else if(h>0){if(o0)){if(o/=f,f<0){if(o0){if(o>l)return;o>u&&(u=o)}if(o=a-c,f||!(o<0)){if(o/=f,f<0){if(o>l)return;o>u&&(u=o)}else if(f>0){if(o0&&(t[0]=s+u*h,t[1]=c+u*f),l<1&&(e[0]=s+l*h,e[1]=c+l*f),!0}}}}}(c,x,t,e,n,r)?s&&(b.lineStart(),b.point(a,o),m=!1):(y||(b.lineStart(),b.point(c[0],c[1])),b.point(x[0],x[1]),s||b.lineEnd(),m=!1)}p=a,g=o,y=s}return _}}var Sl,Al,Ml,Ol=function(){var t,e,n,r=0,i=0,a=960,o=500;return n={stream:function(n){return t&&e===n?t:t=Cl(r,i,a,o)(e=n)},extent:function(s){return arguments.length?(r=+s[0][0],i=+s[0][1],a=+s[1][0],o=+s[1][1],t=e=null,n):[[r,i],[a,o]]}}},Dl=sc(),Nl={sphere:Nc,point:Nc,lineStart:function(){Nl.point=Ll,Nl.lineEnd=Bl},lineEnd:Nc,polygonStart:Nc,polygonEnd:Nc};function Bl(){Nl.point=Nl.lineEnd=Nc}function Ll(t,e){Sl=t*=yc,Al=Tc(e*=yc),Ml=xc(e),Nl.point=Fl}function Fl(t,e){t*=yc;var n=Tc(e*=yc),r=xc(e),i=vc(t-Sl),a=xc(i),o=r*Tc(i),s=Ml*n-Al*r*a,c=Al*n+Ml*r*a;Dl.add(bc(Sc(o*o+s*s),c)),Sl=t,Al=n,Ml=r}var Pl=function(t){return Dl.reset(),$c(t,Nl),+Dl},Il=[null,null],jl={type:"LineString",coordinates:Il},Rl=function(t,e){return Il[0]=t,Il[1]=e,Pl(jl)},Yl={Feature:function(t,e){return Ul(t.geometry,e)},FeatureCollection:function(t,e){for(var n=t.features,r=-1,i=n.length;++r0&&(i=Rl(t[a],t[a-1]))>0&&n<=i&&r<=i&&(n+r-i)*(1-Math.pow((n-r)/i,2))<1e-12*i)return!0;n=r}return!1}function Vl(t,e){return!!xl(t.map(Hl),Gl(e))}function Hl(t){return(t=t.map(Gl)).pop(),t}function Gl(t){return[t[0]*yc,t[1]*yc]}var ql=function(t,e){return(t&&Yl.hasOwnProperty(t.type)?Yl[t.type]:Ul)(t,e)};function Xl(t,e,n){var r=k(t,e-1e-6,n).concat(e);return function(t){return r.map((function(e){return[t,e]}))}}function Zl(t,e,n){var r=k(t,e-1e-6,n).concat(e);return function(t){return r.map((function(e){return[e,t]}))}}function Jl(){var t,e,n,r,i,a,o,s,c,u,l,h,f=10,d=f,p=90,g=360,y=2.5;function v(){return{type:"MultiLineString",coordinates:m()}}function m(){return k(_c(r/p)*p,n,p).map(l).concat(k(_c(s/g)*g,o,g).map(h)).concat(k(_c(e/f)*f,t,f).filter((function(t){return vc(t%p)>1e-6})).map(c)).concat(k(_c(a/d)*d,i,d).filter((function(t){return vc(t%g)>1e-6})).map(u))}return v.lines=function(){return m().map((function(t){return{type:"LineString",coordinates:t}}))},v.outline=function(){return{type:"Polygon",coordinates:[l(r).concat(h(o).slice(1),l(n).reverse().slice(1),h(s).reverse().slice(1))]}},v.extent=function(t){return arguments.length?v.extentMajor(t).extentMinor(t):v.extentMinor()},v.extentMajor=function(t){return arguments.length?(r=+t[0][0],n=+t[1][0],s=+t[0][1],o=+t[1][1],r>n&&(t=r,r=n,n=t),s>o&&(t=s,s=o,o=t),v.precision(y)):[[r,s],[n,o]]},v.extentMinor=function(n){return arguments.length?(e=+n[0][0],t=+n[1][0],a=+n[0][1],i=+n[1][1],e>t&&(n=e,e=t,t=n),a>i&&(n=a,a=i,i=n),v.precision(y)):[[e,a],[t,i]]},v.step=function(t){return arguments.length?v.stepMajor(t).stepMinor(t):v.stepMinor()},v.stepMajor=function(t){return arguments.length?(p=+t[0],g=+t[1],v):[p,g]},v.stepMinor=function(t){return arguments.length?(f=+t[0],d=+t[1],v):[f,d]},v.precision=function(f){return arguments.length?(y=+f,c=Xl(a,i,90),u=Zl(e,t,y),l=Xl(s,o,90),h=Zl(r,n,y),v):y},v.extentMajor([[-180,1e-6-90],[180,90-1e-6]]).extentMinor([[-180,-80-1e-6],[180,80+1e-6]])}function Ql(){return Jl()()}var Kl,th,eh,nh,rh=function(t,e){var n=t[0]*yc,r=t[1]*yc,i=e[0]*yc,a=e[1]*yc,o=xc(r),s=Tc(r),c=xc(a),u=Tc(a),l=o*xc(n),h=o*Tc(n),f=c*xc(i),d=c*Tc(i),p=2*Oc(Sc(Dc(a-r)+o*c*Dc(i-n))),g=Tc(p),y=p?function(t){var e=Tc(t*=p)/g,n=Tc(p-t)/g,r=n*l+e*f,i=n*h+e*d,a=n*s+e*u;return[bc(i,r)*gc,bc(a,Sc(r*r+i*i))*gc]}:function(){return[n*gc,r*gc]};return y.distance=p,y},ih=function(t){return t},ah=sc(),oh=sc(),sh={point:Nc,lineStart:Nc,lineEnd:Nc,polygonStart:function(){sh.lineStart=ch,sh.lineEnd=hh},polygonEnd:function(){sh.lineStart=sh.lineEnd=sh.point=Nc,ah.add(vc(oh)),oh.reset()},result:function(){var t=ah/2;return ah.reset(),t}};function ch(){sh.point=uh}function uh(t,e){sh.point=lh,Kl=eh=t,th=nh=e}function lh(t,e){oh.add(nh*t-eh*e),eh=t,nh=e}function hh(){lh(Kl,th)}var fh=sh,dh=1/0,ph=dh,gh=-dh,yh=gh;var vh,mh,bh,xh,_h={point:function(t,e){tgh&&(gh=t);eyh&&(yh=e)},lineStart:Nc,lineEnd:Nc,polygonStart:Nc,polygonEnd:Nc,result:function(){var t=[[dh,ph],[gh,yh]];return gh=yh=-(ph=dh=1/0),t}},kh=0,wh=0,Eh=0,Th=0,Ch=0,Sh=0,Ah=0,Mh=0,Oh=0,Dh={point:Nh,lineStart:Bh,lineEnd:Ph,polygonStart:function(){Dh.lineStart=Ih,Dh.lineEnd=jh},polygonEnd:function(){Dh.point=Nh,Dh.lineStart=Bh,Dh.lineEnd=Ph},result:function(){var t=Oh?[Ah/Oh,Mh/Oh]:Sh?[Th/Sh,Ch/Sh]:Eh?[kh/Eh,wh/Eh]:[NaN,NaN];return kh=wh=Eh=Th=Ch=Sh=Ah=Mh=Oh=0,t}};function Nh(t,e){kh+=t,wh+=e,++Eh}function Bh(){Dh.point=Lh}function Lh(t,e){Dh.point=Fh,Nh(bh=t,xh=e)}function Fh(t,e){var n=t-bh,r=e-xh,i=Sc(n*n+r*r);Th+=i*(bh+t)/2,Ch+=i*(xh+e)/2,Sh+=i,Nh(bh=t,xh=e)}function Ph(){Dh.point=Nh}function Ih(){Dh.point=Rh}function jh(){Yh(vh,mh)}function Rh(t,e){Dh.point=Yh,Nh(vh=bh=t,mh=xh=e)}function Yh(t,e){var n=t-bh,r=e-xh,i=Sc(n*n+r*r);Th+=i*(bh+t)/2,Ch+=i*(xh+e)/2,Sh+=i,Ah+=(i=xh*t-bh*e)*(bh+t),Mh+=i*(xh+e),Oh+=3*i,Nh(bh=t,xh=e)}var zh=Dh;function Uh(t){this._context=t}Uh.prototype={_radius:4.5,pointRadius:function(t){return this._radius=t,this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._context.closePath(),this._point=NaN},point:function(t,e){switch(this._point){case 0:this._context.moveTo(t,e),this._point=1;break;case 1:this._context.lineTo(t,e);break;default:this._context.moveTo(t+this._radius,e),this._context.arc(t,e,this._radius,0,pc)}},result:Nc};var $h,Wh,Vh,Hh,Gh,qh=sc(),Xh={point:Nc,lineStart:function(){Xh.point=Zh},lineEnd:function(){$h&&Jh(Wh,Vh),Xh.point=Nc},polygonStart:function(){$h=!0},polygonEnd:function(){$h=null},result:function(){var t=+qh;return qh.reset(),t}};function Zh(t,e){Xh.point=Jh,Wh=Hh=t,Vh=Gh=e}function Jh(t,e){Hh-=t,Gh-=e,qh.add(Sc(Hh*Hh+Gh*Gh)),Hh=t,Gh=e}var Qh=Xh;function Kh(){this._string=[]}function tf(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}Kh.prototype={_radius:4.5,_circle:tf(4.5),pointRadius:function(t){return(t=+t)!==this._radius&&(this._radius=t,this._circle=null),this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._string.push("Z"),this._point=NaN},point:function(t,e){switch(this._point){case 0:this._string.push("M",t,",",e),this._point=1;break;case 1:this._string.push("L",t,",",e);break;default:null==this._circle&&(this._circle=tf(this._radius)),this._string.push("M",t,",",e,this._circle)}},result:function(){if(this._string.length){var t=this._string.join("");return this._string=[],t}return null}};var ef=function(t,e){var n,r,i=4.5;function a(t){return t&&("function"==typeof i&&r.pointRadius(+i.apply(this,arguments)),$c(t,n(r))),r.result()}return a.area=function(t){return $c(t,n(fh)),fh.result()},a.measure=function(t){return $c(t,n(Qh)),Qh.result()},a.bounds=function(t){return $c(t,n(_h)),_h.result()},a.centroid=function(t){return $c(t,n(zh)),zh.result()},a.projection=function(e){return arguments.length?(n=null==e?(t=null,ih):(t=e).stream,a):t},a.context=function(t){return arguments.length?(r=null==t?(e=null,new Kh):new Uh(e=t),"function"!=typeof i&&r.pointRadius(i),a):e},a.pointRadius=function(t){return arguments.length?(i="function"==typeof t?t:(r.pointRadius(+t),+t),a):i},a.projection(t).context(e)},nf=function(t){return{stream:rf(t)}};function rf(t){return function(e){var n=new af;for(var r in t)n[r]=t[r];return n.stream=e,n}}function af(){}function of(t,e,n){var r=t.clipExtent&&t.clipExtent();return t.scale(150).translate([0,0]),null!=r&&t.clipExtent(null),$c(n,t.stream(_h)),e(_h.result()),null!=r&&t.clipExtent(r),t}function sf(t,e,n){return of(t,(function(n){var r=e[1][0]-e[0][0],i=e[1][1]-e[0][1],a=Math.min(r/(n[1][0]-n[0][0]),i/(n[1][1]-n[0][1])),o=+e[0][0]+(r-a*(n[1][0]+n[0][0]))/2,s=+e[0][1]+(i-a*(n[1][1]+n[0][1]))/2;t.scale(150*a).translate([o,s])}),n)}function cf(t,e,n){return sf(t,[[0,0],e],n)}function uf(t,e,n){return of(t,(function(n){var r=+e,i=r/(n[1][0]-n[0][0]),a=(r-i*(n[1][0]+n[0][0]))/2,o=-i*n[0][1];t.scale(150*i).translate([a,o])}),n)}function lf(t,e,n){return of(t,(function(n){var r=+e,i=r/(n[1][1]-n[0][1]),a=-i*n[0][0],o=(r-i*(n[1][1]+n[0][1]))/2;t.scale(150*i).translate([a,o])}),n)}af.prototype={constructor:af,point:function(t,e){this.stream.point(t,e)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}};var hf=xc(30*yc),ff=function(t,e){return+e?function(t,e){function n(r,i,a,o,s,c,u,l,h,f,d,p,g,y){var v=u-r,m=l-i,b=v*v+m*m;if(b>4*e&&g--){var x=o+f,_=s+d,k=c+p,w=Sc(x*x+_*_+k*k),E=Oc(k/=w),T=vc(vc(k)-1)<1e-6||vc(a-h)<1e-6?(a+h)/2:bc(_,x),C=t(T,E),S=C[0],A=C[1],M=S-r,O=A-i,D=m*M-v*O;(D*D/b>e||vc((v*M+m*O)/b-.5)>.3||o*f+s*d+c*p2?t[2]%360*yc:0,S()):[y*gc,v*gc,m*gc]},T.angle=function(t){return arguments.length?(b=t%360*yc,S()):b*gc},T.precision=function(t){return arguments.length?(o=ff(s,E=t*t),A()):Sc(E)},T.fitExtent=function(t,e){return sf(T,t,e)},T.fitSize=function(t,e){return cf(T,t,e)},T.fitWidth=function(t,e){return uf(T,t,e)},T.fitHeight=function(t,e){return lf(T,t,e)},function(){return e=t.apply(this,arguments),T.invert=e.invert&&C,S()}}function mf(t){var e=0,n=hc/3,r=vf(t),i=r(e,n);return i.parallels=function(t){return arguments.length?r(e=t[0]*yc,n=t[1]*yc):[e*gc,n*gc]},i}function bf(t,e){var n=Tc(t),r=(n+Tc(e))/2;if(vc(r)<1e-6)return function(t){var e=xc(t);function n(t,n){return[t*e,Tc(n)/e]}return n.invert=function(t,n){return[t/e,Oc(n*e)]},n}(t);var i=1+n*(2*r-n),a=Sc(i)/r;function o(t,e){var n=Sc(i-2*r*Tc(e))/r;return[n*Tc(t*=r),a-n*xc(t)]}return o.invert=function(t,e){var n=a-e;return[bc(t,vc(n))/r*Cc(n),Oc((i-(t*t+n*n)*r*r)/(2*r))]},o}var xf=function(){return mf(bf).scale(155.424).center([0,33.6442])},_f=function(){return xf().parallels([29.5,45.5]).scale(1070).translate([480,250]).rotate([96,0]).center([-.6,38.7])};var kf=function(){var t,e,n,r,i,a,o=_f(),s=xf().rotate([154,0]).center([-2,58.5]).parallels([55,65]),c=xf().rotate([157,0]).center([-3,19.9]).parallels([8,18]),u={point:function(t,e){a=[t,e]}};function l(t){var e=t[0],o=t[1];return a=null,n.point(e,o),a||(r.point(e,o),a)||(i.point(e,o),a)}function h(){return t=e=null,l}return l.invert=function(t){var e=o.scale(),n=o.translate(),r=(t[0]-n[0])/e,i=(t[1]-n[1])/e;return(i>=.12&&i<.234&&r>=-.425&&r<-.214?s:i>=.166&&i<.234&&r>=-.214&&r<-.115?c:o).invert(t)},l.stream=function(n){return t&&e===n?t:(r=[o.stream(e=n),s.stream(n),c.stream(n)],i=r.length,t={point:function(t,e){for(var n=-1;++n0?e<1e-6-fc&&(e=1e-6-fc):e>fc-1e-6&&(e=fc-1e-6);var n=i/Ec(Nf(e),r);return[n*Tc(r*t),i-n*xc(r*t)]}return a.invert=function(t,e){var n=i-e,a=Cc(r)*Sc(t*t+n*n);return[bc(t,vc(n))/r*Cc(n),2*mc(Ec(i/a,1/r))-fc]},a}var Lf=function(){return mf(Bf).scale(109.5).parallels([30,30])};function Ff(t,e){return[t,e]}Ff.invert=Ff;var Pf=function(){return yf(Ff).scale(152.63)};function If(t,e){var n=xc(t),r=t===e?Tc(t):(n-xc(e))/(e-t),i=n/r+t;if(vc(r)<1e-6)return Ff;function a(t,e){var n=i-e,a=r*t;return[n*Tc(a),i-n*xc(a)]}return a.invert=function(t,e){var n=i-e;return[bc(t,vc(n))/r*Cc(n),i-Cc(r)*Sc(t*t+n*n)]},a}var jf=function(){return mf(If).scale(131.154).center([0,13.9389])},Rf=1.340264,Yf=-.081106,zf=893e-6,Uf=.003796,$f=Sc(3)/2;function Wf(t,e){var n=Oc($f*Tc(e)),r=n*n,i=r*r*r;return[t*xc(n)/($f*(Rf+3*Yf*r+i*(7*zf+9*Uf*r))),n*(Rf+Yf*r+i*(zf+Uf*r))]}Wf.invert=function(t,e){for(var n,r=e,i=r*r,a=i*i*i,o=0;o<12&&(a=(i=(r-=n=(r*(Rf+Yf*i+a*(zf+Uf*i))-e)/(Rf+3*Yf*i+a*(7*zf+9*Uf*i)))*r)*i*i,!(vc(n)<1e-12));++o);return[$f*t*(Rf+3*Yf*i+a*(7*zf+9*Uf*i))/xc(r),Oc(Tc(r)/$f)]};var Vf=function(){return yf(Wf).scale(177.158)};function Hf(t,e){var n=xc(e),r=xc(t)*n;return[n*Tc(t)/r,Tc(e)/r]}Hf.invert=Ef(mc);var Gf=function(){return yf(Hf).scale(144.049).clipAngle(60)};function qf(t,e,n,r){return 1===t&&1===e&&0===n&&0===r?ih:rf({point:function(i,a){this.stream.point(i*t+n,a*e+r)}})}var Xf=function(){var t,e,n,r,i,a,o=1,s=0,c=0,u=1,l=1,h=ih,f=null,d=ih;function p(){return r=i=null,a}return a={stream:function(t){return r&&i===t?r:r=h(d(i=t))},postclip:function(r){return arguments.length?(d=r,f=t=e=n=null,p()):d},clipExtent:function(r){return arguments.length?(d=null==r?(f=t=e=n=null,ih):Cl(f=+r[0][0],t=+r[0][1],e=+r[1][0],n=+r[1][1]),p()):null==f?null:[[f,t],[e,n]]},scale:function(t){return arguments.length?(h=qf((o=+t)*u,o*l,s,c),p()):o},translate:function(t){return arguments.length?(h=qf(o*u,o*l,s=+t[0],c=+t[1]),p()):[s,c]},reflectX:function(t){return arguments.length?(h=qf(o*(u=t?-1:1),o*l,s,c),p()):u<0},reflectY:function(t){return arguments.length?(h=qf(o*u,o*(l=t?-1:1),s,c),p()):l<0},fitExtent:function(t,e){return sf(a,t,e)},fitSize:function(t,e){return cf(a,t,e)},fitWidth:function(t,e){return uf(a,t,e)},fitHeight:function(t,e){return lf(a,t,e)}}};function Zf(t,e){var n=e*e,r=n*n;return[t*(.8707-.131979*n+r*(r*(.003971*n-.001529*r)-.013791)),e*(1.007226+n*(.015085+r*(.028874*n-.044475-.005916*r)))]}Zf.invert=function(t,e){var n,r=e,i=25;do{var a=r*r,o=a*a;r-=n=(r*(1.007226+a*(.015085+o*(.028874*a-.044475-.005916*o)))-e)/(1.007226+a*(.045255+o*(.259866*a-.311325-.005916*11*o)))}while(vc(n)>1e-6&&--i>0);return[t/(.8707+(a=r*r)*(a*(a*a*a*(.003971-.001529*a)-.013791)-.131979)),r]};var Jf=function(){return yf(Zf).scale(175.295)};function Qf(t,e){return[xc(e)*Tc(t),Tc(e)]}Qf.invert=Ef(Oc);var Kf=function(){return yf(Qf).scale(249.5).clipAngle(90+1e-6)};function td(t,e){var n=xc(e),r=1+xc(t)*n;return[n*Tc(t)/r,Tc(e)/r]}td.invert=Ef((function(t){return 2*mc(t)}));var ed=function(){return yf(td).scale(250).clipAngle(142)};function nd(t,e){return[wc(Ac((fc+e)/2)),-t]}nd.invert=function(t,e){return[-e,2*mc(kc(t))-fc]};var rd=function(){var t=Df(nd),e=t.center,n=t.rotate;return t.center=function(t){return arguments.length?e([-t[1],t[0]]):[(t=e())[1],-t[0]]},t.rotate=function(t){return arguments.length?n([t[0],t[1],t.length>2?t[2]+90:90]):[(t=n())[0],t[1],t[2]-90]},n([0,0,90]).scale(159.155)};function id(t,e){return t.parent===e.parent?1:2}function ad(t,e){return t+e.x}function od(t,e){return Math.max(t,e.y)}var sd=function(){var t=id,e=1,n=1,r=!1;function i(i){var a,o=0;i.eachAfter((function(e){var n=e.children;n?(e.x=function(t){return t.reduce(ad,0)/t.length}(n),e.y=function(t){return 1+t.reduce(od,0)}(n)):(e.x=a?o+=t(e,a):0,e.y=0,a=e)}));var s=function(t){for(var e;e=t.children;)t=e[0];return t}(i),c=function(t){for(var e;e=t.children;)t=e[e.length-1];return t}(i),u=s.x-t(s,c)/2,l=c.x+t(c,s)/2;return i.eachAfter(r?function(t){t.x=(t.x-i.x)*e,t.y=(i.y-t.y)*n}:function(t){t.x=(t.x-u)/(l-u)*e,t.y=(1-(i.y?t.y/i.y:1))*n})}return i.separation=function(e){return arguments.length?(t=e,i):t},i.size=function(t){return arguments.length?(r=!1,e=+t[0],n=+t[1],i):r?null:[e,n]},i.nodeSize=function(t){return arguments.length?(r=!0,e=+t[0],n=+t[1],i):r?[e,n]:null},i};function cd(t){var e=0,n=t.children,r=n&&n.length;if(r)for(;--r>=0;)e+=n[r].value;else e=1;t.value=e}function ud(t,e){var n,r,i,a,o,s=new dd(t),c=+t.value&&(s.value=t.value),u=[s];for(null==e&&(e=ld);n=u.pop();)if(c&&(n.value=+n.data.value),(i=e(n.data))&&(o=i.length))for(n.children=new Array(o),a=o-1;a>=0;--a)u.push(r=n.children[a]=new dd(i[a])),r.parent=n,r.depth=n.depth+1;return s.eachBefore(fd)}function ld(t){return t.children}function hd(t){t.data=t.data.data}function fd(t){var e=0;do{t.height=e}while((t=t.parent)&&t.height<++e)}function dd(t){this.data=t,this.depth=this.height=0,this.parent=null}dd.prototype=ud.prototype={constructor:dd,count:function(){return this.eachAfter(cd)},each:function(t){var e,n,r,i,a=this,o=[a];do{for(e=o.reverse(),o=[];a=e.pop();)if(t(a),n=a.children)for(r=0,i=n.length;r=0;--n)i.push(e[n]);return this},sum:function(t){return this.eachAfter((function(e){for(var n=+t(e.data)||0,r=e.children,i=r&&r.length;--i>=0;)n+=r[i].value;e.value=n}))},sort:function(t){return this.eachBefore((function(e){e.children&&e.children.sort(t)}))},path:function(t){for(var e=this,n=function(t,e){if(t===e)return t;var n=t.ancestors(),r=e.ancestors(),i=null;t=n.pop(),e=r.pop();for(;t===e;)i=t,t=n.pop(),e=r.pop();return i}(e,t),r=[e];e!==n;)e=e.parent,r.push(e);for(var i=r.length;t!==n;)r.splice(i,0,t),t=t.parent;return r},ancestors:function(){for(var t=this,e=[t];t=t.parent;)e.push(t);return e},descendants:function(){var t=[];return this.each((function(e){t.push(e)})),t},leaves:function(){var t=[];return this.eachBefore((function(e){e.children||t.push(e)})),t},links:function(){var t=this,e=[];return t.each((function(n){n!==t&&e.push({source:n.parent,target:n})})),e},copy:function(){return ud(this).eachBefore(hd)}};var pd=Array.prototype.slice;var gd=function(t){for(var e,n,r=0,i=(t=function(t){for(var e,n,r=t.length;r;)n=Math.random()*r--|0,e=t[r],t[r]=t[n],t[n]=e;return t}(pd.call(t))).length,a=[];r0&&n*n>r*r+i*i}function bd(t,e){for(var n=0;n(o*=o)?(r=(u+o-i)/(2*u),a=Math.sqrt(Math.max(0,o/u-r*r)),n.x=t.x-r*s-a*c,n.y=t.y-r*c+a*s):(r=(u+i-o)/(2*u),a=Math.sqrt(Math.max(0,i/u-r*r)),n.x=e.x+r*s-a*c,n.y=e.y+r*c+a*s)):(n.x=e.x+n.r,n.y=e.y)}function Ed(t,e){var n=t.r+e.r-1e-6,r=e.x-t.x,i=e.y-t.y;return n>0&&n*n>r*r+i*i}function Td(t){var e=t._,n=t.next._,r=e.r+n.r,i=(e.x*n.r+n.x*e.r)/r,a=(e.y*n.r+n.y*e.r)/r;return i*i+a*a}function Cd(t){this._=t,this.next=null,this.previous=null}function Sd(t){if(!(i=t.length))return 0;var e,n,r,i,a,o,s,c,u,l,h;if((e=t[0]).x=0,e.y=0,!(i>1))return e.r;if(n=t[1],e.x=-n.r,n.x=e.r,n.y=0,!(i>2))return e.r+n.r;wd(n,e,r=t[2]),e=new Cd(e),n=new Cd(n),r=new Cd(r),e.next=r.previous=n,n.next=e.previous=r,r.next=n.previous=e;t:for(s=3;s0)throw new Error("cycle");return a}return n.id=function(e){return arguments.length?(t=Od(e),n):t},n.parentId=function(t){return arguments.length?(e=Od(t),n):e},n};function Hd(t,e){return t.parent===e.parent?1:2}function Gd(t){var e=t.children;return e?e[0]:t.t}function qd(t){var e=t.children;return e?e[e.length-1]:t.t}function Xd(t,e,n){var r=n/(e.i-t.i);e.c-=r,e.s+=n,t.c+=r,e.z+=n,e.m+=n}function Zd(t,e,n){return t.a.parent===e.parent?t.a:n}function Jd(t,e){this._=t,this.parent=null,this.children=null,this.A=null,this.a=this,this.z=0,this.m=0,this.c=0,this.s=0,this.t=null,this.i=e}Jd.prototype=Object.create(dd.prototype);var Qd=function(){var t=Hd,e=1,n=1,r=null;function i(i){var c=function(t){for(var e,n,r,i,a,o=new Jd(t,0),s=[o];e=s.pop();)if(r=e._.children)for(e.children=new Array(a=r.length),i=a-1;i>=0;--i)s.push(n=e.children[i]=new Jd(r[i],i)),n.parent=e;return(o.parent=new Jd(null,0)).children=[o],o}(i);if(c.eachAfter(a),c.parent.m=-c.z,c.eachBefore(o),r)i.eachBefore(s);else{var u=i,l=i,h=i;i.eachBefore((function(t){t.xl.x&&(l=t),t.depth>h.depth&&(h=t)}));var f=u===l?1:t(u,l)/2,d=f-u.x,p=e/(l.x+f+d),g=n/(h.depth||1);i.eachBefore((function(t){t.x=(t.x+d)*p,t.y=t.depth*g}))}return i}function a(e){var n=e.children,r=e.parent.children,i=e.i?r[e.i-1]:null;if(n){!function(t){for(var e,n=0,r=0,i=t.children,a=i.length;--a>=0;)(e=i[a]).z+=n,e.m+=n,n+=e.s+(r+=e.c)}(e);var a=(n[0].z+n[n.length-1].z)/2;i?(e.z=i.z+t(e._,i._),e.m=e.z-a):e.z=a}else i&&(e.z=i.z+t(e._,i._));e.parent.A=function(e,n,r){if(n){for(var i,a=e,o=e,s=n,c=a.parent.children[0],u=a.m,l=o.m,h=s.m,f=c.m;s=qd(s),a=Gd(a),s&&a;)c=Gd(c),(o=qd(o)).a=e,(i=s.z+h-a.z-u+t(s._,a._))>0&&(Xd(Zd(s,e,r),e,i),u+=i,l+=i),h+=s.m,u+=a.m,f+=c.m,l+=o.m;s&&!qd(o)&&(o.t=s,o.m+=h-l),a&&!Gd(c)&&(c.t=a,c.m+=u-f,r=e)}return r}(e,i,e.parent.A||r[0])}function o(t){t._.x=t.z+t.parent.m,t.m+=t.parent.m}function s(t){t.x*=e,t.y=t.depth*n}return i.separation=function(e){return arguments.length?(t=e,i):t},i.size=function(t){return arguments.length?(r=!1,e=+t[0],n=+t[1],i):r?null:[e,n]},i.nodeSize=function(t){return arguments.length?(r=!0,e=+t[0],n=+t[1],i):r?[e,n]:null},i},Kd=function(t,e,n,r,i){for(var a,o=t.children,s=-1,c=o.length,u=t.value&&(i-n)/t.value;++sf&&(f=s),y=l*l*g,(d=Math.max(f/y,y/h))>p){l-=s;break}p=d}v.push(o={value:l,dice:c1?e:1)},n}(tp),rp=function(){var t=np,e=!1,n=1,r=1,i=[0],a=Dd,o=Dd,s=Dd,c=Dd,u=Dd;function l(t){return t.x0=t.y0=0,t.x1=n,t.y1=r,t.eachBefore(h),i=[0],e&&t.eachBefore(jd),t}function h(e){var n=i[e.depth],r=e.x0+n,l=e.y0+n,h=e.x1-n,f=e.y1-n;h=n-1){var l=s[e];return l.x0=i,l.y0=a,l.x1=o,void(l.y1=c)}var h=u[e],f=r/2+h,d=e+1,p=n-1;for(;d>>1;u[g]c-a){var m=(i*v+o*y)/r;t(e,d,y,i,a,m,c),t(d,n,v,m,a,o,c)}else{var b=(a*v+c*y)/r;t(e,d,y,i,a,o,b),t(d,n,v,i,b,o,c)}}(0,c,t.value,e,n,r,i)},ap=function(t,e,n,r,i){(1&t.depth?Kd:Rd)(t,e,n,r,i)},op=function t(e){function n(t,n,r,i,a){if((o=t._squarify)&&o.ratio===e)for(var o,s,c,u,l,h=-1,f=o.length,d=t.value;++h1?e:1)},n}(tp),sp=function(t){var e=t.length;return function(n){return t[Math.max(0,Math.min(e-1,Math.floor(n*e)))]}},cp=function(t,e){var n=un(+t,+e);return function(t){var e=n(t);return e-360*Math.floor(e/360)}},up=function(t,e){return t=+t,e=+e,function(n){return Math.round(t*(1-n)+e*n)}},lp=Math.SQRT2;function hp(t){return((t=Math.exp(t))+1/t)/2}var fp=function(t,e){var n,r,i=t[0],a=t[1],o=t[2],s=e[0],c=e[1],u=e[2],l=s-i,h=c-a,f=l*l+h*h;if(f<1e-12)r=Math.log(u/o)/lp,n=function(t){return[i+t*l,a+t*h,o*Math.exp(lp*t*r)]};else{var d=Math.sqrt(f),p=(u*u-o*o+4*f)/(2*o*2*d),g=(u*u-o*o-4*f)/(2*u*2*d),y=Math.log(Math.sqrt(p*p+1)-p),v=Math.log(Math.sqrt(g*g+1)-g);r=(v-y)/lp,n=function(t){var e,n=t*r,s=hp(y),c=o/(2*d)*(s*(e=lp*n+y,((e=Math.exp(2*e))-1)/(e+1))-function(t){return((t=Math.exp(t))-1/t)/2}(y));return[i+c*l,a+c*h,o*s/hp(lp*n+y)]}}return n.duration=1e3*r,n};function dp(t){return function(e,n){var r=t((e=tn(e)).h,(n=tn(n)).h),i=hn(e.s,n.s),a=hn(e.l,n.l),o=hn(e.opacity,n.opacity);return function(t){return e.h=r(t),e.s=i(t),e.l=a(t),e.opacity=o(t),e+""}}}var pp=dp(un),gp=dp(hn);function yp(t,e){var n=hn((t=pa(t)).l,(e=pa(e)).l),r=hn(t.a,e.a),i=hn(t.b,e.b),a=hn(t.opacity,e.opacity);return function(e){return t.l=n(e),t.a=r(e),t.b=i(e),t.opacity=a(e),t+""}}function vp(t){return function(e,n){var r=t((e=ka(e)).h,(n=ka(n)).h),i=hn(e.c,n.c),a=hn(e.l,n.l),o=hn(e.opacity,n.opacity);return function(t){return e.h=r(t),e.c=i(t),e.l=a(t),e.opacity=o(t),e+""}}}var mp=vp(un),bp=vp(hn);function xp(t){return function e(n){function r(e,r){var i=t((e=Oa(e)).h,(r=Oa(r)).h),a=hn(e.s,r.s),o=hn(e.l,r.l),s=hn(e.opacity,r.opacity);return function(t){return e.h=i(t),e.s=a(t),e.l=o(Math.pow(t,n)),e.opacity=s(t),e+""}}return n=+n,r.gamma=e,r}(1)}var _p=xp(un),kp=xp(hn);function wp(t,e){for(var n=0,r=e.length-1,i=e[0],a=new Array(r<0?0:r);n1&&(e=t[a[o-2]],n=t[a[o-1]],r=t[s],(n[0]-e[0])*(r[1]-e[1])-(n[1]-e[1])*(r[0]-e[0])<=0);)--o;a[o++]=s}return a.slice(0,o)}var Mp=function(t){if((n=t.length)<3)return null;var e,n,r=new Array(n),i=new Array(n);for(e=0;e=0;--e)u.push(t[r[a[e]][2]]);for(e=+s;es!=u>s&&o<(c-n)*(s-r)/(u-r)+n&&(l=!l),c=n,u=r;return l},Dp=function(t){for(var e,n,r=-1,i=t.length,a=t[i-1],o=a[0],s=a[1],c=0;++r1);return t+n*a*Math.sqrt(-2*Math.log(i)/i)}}return n.source=t,n}(Np),Fp=function t(e){function n(){var t=Lp.source(e).apply(this,arguments);return function(){return Math.exp(t())}}return n.source=t,n}(Np),Pp=function t(e){function n(t){return function(){for(var n=0,r=0;rr&&(e=n,n=r,r=e),function(t){return Math.max(n,Math.min(r,t))}}function tg(t,e,n){var r=t[0],i=t[1],a=e[0],o=e[1];return i2?eg:tg,i=a=null,h}function h(e){return isNaN(e=+e)?n:(i||(i=r(o.map(t),s,c)))(t(u(e)))}return h.invert=function(n){return u(e((a||(a=r(s,o.map(t),_n)))(n)))},h.domain=function(t){return arguments.length?(o=Up.call(t,Xp),u===Jp||(u=Kp(o)),l()):o.slice()},h.range=function(t){return arguments.length?(s=$p.call(t),l()):s.slice()},h.rangeRound=function(t){return s=$p.call(t),c=up,l()},h.clamp=function(t){return arguments.length?(u=t?Kp(o):Jp,h):u!==Jp},h.interpolate=function(t){return arguments.length?(c=t,l()):c},h.unknown=function(t){return arguments.length?(n=t,h):n},function(n,r){return t=n,e=r,l()}}function ig(t,e){return rg()(t,e)}var ag=function(t,e,n,r){var i,a=A(t,e,n);switch((r=Vs(null==r?",f":r)).type){case"s":var o=Math.max(Math.abs(t),Math.abs(e));return null!=r.precision||isNaN(i=ac(a,o))||(r.precision=i),Zs(r,o);case"":case"e":case"g":case"p":case"r":null!=r.precision||isNaN(i=oc(a,Math.max(Math.abs(t),Math.abs(e))))||(r.precision=i-("e"===r.type));break;case"f":case"%":null!=r.precision||isNaN(i=ic(a))||(r.precision=i-2*("%"===r.type))}return Xs(r)};function og(t){var e=t.domain;return t.ticks=function(t){var n=e();return C(n[0],n[n.length-1],null==t?10:t)},t.tickFormat=function(t,n){var r=e();return ag(r[0],r[r.length-1],null==t?10:t,n)},t.nice=function(n){null==n&&(n=10);var r,i=e(),a=0,o=i.length-1,s=i[a],c=i[o];return c0?r=S(s=Math.floor(s/r)*r,c=Math.ceil(c/r)*r,n):r<0&&(r=S(s=Math.ceil(s*r)/r,c=Math.floor(c*r)/r,n)),r>0?(i[a]=Math.floor(s/r)*r,i[o]=Math.ceil(c/r)*r,e(i)):r<0&&(i[a]=Math.ceil(s*r)/r,i[o]=Math.floor(c*r)/r,e(i)),t},t}function sg(){var t=ig(Jp,Jp);return t.copy=function(){return ng(t,sg())},Rp.apply(t,arguments),og(t)}function cg(t){var e;function n(t){return isNaN(t=+t)?e:t}return n.invert=n,n.domain=n.range=function(e){return arguments.length?(t=Up.call(e,Xp),n):t.slice()},n.unknown=function(t){return arguments.length?(e=t,n):e},n.copy=function(){return cg(t).unknown(e)},t=arguments.length?Up.call(t,Xp):[0,1],og(n)}var ug=function(t,e){var n,r=0,i=(t=t.slice()).length-1,a=t[r],o=t[i];return o0){for(;fc)break;g.push(h)}}else for(;f=1;--l)if(!((h=u*l)c)break;g.push(h)}}else g=C(f,d,Math.min(d-f,p)).map(n);return r?g.reverse():g},r.tickFormat=function(t,i){if(null==i&&(i=10===a?".0e":","),"function"!=typeof i&&(i=Xs(i)),t===1/0)return i;null==t&&(t=10);var o=Math.max(1,a*t/r.ticks().length);return function(t){var r=t/n(Math.round(e(t)));return r*a0?i[r-1]:e[0],r=r?[i[r-1],n]:[i[o-1],i[o]]},o.unknown=function(e){return arguments.length?(t=e,o):o},o.thresholds=function(){return i.slice()},o.copy=function(){return Mg().domain([e,n]).range(a).unknown(t)},Rp.apply(og(o),arguments)}function Og(){var t,e=[.5],n=[0,1],r=1;function i(i){return i<=i?n[c(e,i,0,r)]:t}return i.domain=function(t){return arguments.length?(e=$p.call(t),r=Math.min(e.length,n.length-1),i):e.slice()},i.range=function(t){return arguments.length?(n=$p.call(t),r=Math.min(e.length,n.length-1),i):n.slice()},i.invertExtent=function(t){var r=n.indexOf(t);return[e[r-1],e[r]]},i.unknown=function(e){return arguments.length?(t=e,i):t},i.copy=function(){return Og().domain(e).range(n).unknown(t)},Rp.apply(i,arguments)}var Dg=new Date,Ng=new Date;function Bg(t,e,n,r){function i(e){return t(e=0===arguments.length?new Date:new Date(+e)),e}return i.floor=function(e){return t(e=new Date(+e)),e},i.ceil=function(n){return t(n=new Date(n-1)),e(n,1),t(n),n},i.round=function(t){var e=i(t),n=i.ceil(t);return t-e0))return s;do{s.push(o=new Date(+n)),e(n,a),t(n)}while(o=e)for(;t(e),!n(e);)e.setTime(e-1)}),(function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;e(t,-1),!n(t););else for(;--r>=0;)for(;e(t,1),!n(t););}))},n&&(i.count=function(e,r){return Dg.setTime(+e),Ng.setTime(+r),t(Dg),t(Ng),Math.floor(n(Dg,Ng))},i.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?i.filter(r?function(e){return r(e)%t==0}:function(e){return i.count(0,e)%t==0}):i:null}),i}var Lg=Bg((function(t){t.setMonth(0,1),t.setHours(0,0,0,0)}),(function(t,e){t.setFullYear(t.getFullYear()+e)}),(function(t,e){return e.getFullYear()-t.getFullYear()}),(function(t){return t.getFullYear()}));Lg.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Bg((function(e){e.setFullYear(Math.floor(e.getFullYear()/t)*t),e.setMonth(0,1),e.setHours(0,0,0,0)}),(function(e,n){e.setFullYear(e.getFullYear()+n*t)})):null};var Fg=Lg,Pg=Lg.range,Ig=Bg((function(t){t.setDate(1),t.setHours(0,0,0,0)}),(function(t,e){t.setMonth(t.getMonth()+e)}),(function(t,e){return e.getMonth()-t.getMonth()+12*(e.getFullYear()-t.getFullYear())}),(function(t){return t.getMonth()})),jg=Ig,Rg=Ig.range;function Yg(t){return Bg((function(e){e.setDate(e.getDate()-(e.getDay()+7-t)%7),e.setHours(0,0,0,0)}),(function(t,e){t.setDate(t.getDate()+7*e)}),(function(t,e){return(e-t-6e4*(e.getTimezoneOffset()-t.getTimezoneOffset()))/6048e5}))}var zg=Yg(0),Ug=Yg(1),$g=Yg(2),Wg=Yg(3),Vg=Yg(4),Hg=Yg(5),Gg=Yg(6),qg=zg.range,Xg=Ug.range,Zg=$g.range,Jg=Wg.range,Qg=Vg.range,Kg=Hg.range,ty=Gg.range,ey=Bg((function(t){t.setHours(0,0,0,0)}),(function(t,e){t.setDate(t.getDate()+e)}),(function(t,e){return(e-t-6e4*(e.getTimezoneOffset()-t.getTimezoneOffset()))/864e5}),(function(t){return t.getDate()-1})),ny=ey,ry=ey.range,iy=Bg((function(t){t.setTime(t-t.getMilliseconds()-1e3*t.getSeconds()-6e4*t.getMinutes())}),(function(t,e){t.setTime(+t+36e5*e)}),(function(t,e){return(e-t)/36e5}),(function(t){return t.getHours()})),ay=iy,oy=iy.range,sy=Bg((function(t){t.setTime(t-t.getMilliseconds()-1e3*t.getSeconds())}),(function(t,e){t.setTime(+t+6e4*e)}),(function(t,e){return(e-t)/6e4}),(function(t){return t.getMinutes()})),cy=sy,uy=sy.range,ly=Bg((function(t){t.setTime(t-t.getMilliseconds())}),(function(t,e){t.setTime(+t+1e3*e)}),(function(t,e){return(e-t)/1e3}),(function(t){return t.getUTCSeconds()})),hy=ly,fy=ly.range,dy=Bg((function(){}),(function(t,e){t.setTime(+t+e)}),(function(t,e){return e-t}));dy.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?Bg((function(e){e.setTime(Math.floor(e/t)*t)}),(function(e,n){e.setTime(+e+n*t)}),(function(e,n){return(n-e)/t})):dy:null};var py=dy,gy=dy.range;function yy(t){return Bg((function(e){e.setUTCDate(e.getUTCDate()-(e.getUTCDay()+7-t)%7),e.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+7*e)}),(function(t,e){return(e-t)/6048e5}))}var vy=yy(0),my=yy(1),by=yy(2),xy=yy(3),_y=yy(4),ky=yy(5),wy=yy(6),Ey=vy.range,Ty=my.range,Cy=by.range,Sy=xy.range,Ay=_y.range,My=ky.range,Oy=wy.range,Dy=Bg((function(t){t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCDate(t.getUTCDate()+e)}),(function(t,e){return(e-t)/864e5}),(function(t){return t.getUTCDate()-1})),Ny=Dy,By=Dy.range,Ly=Bg((function(t){t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)}),(function(t,e){t.setUTCFullYear(t.getUTCFullYear()+e)}),(function(t,e){return e.getUTCFullYear()-t.getUTCFullYear()}),(function(t){return t.getUTCFullYear()}));Ly.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Bg((function(e){e.setUTCFullYear(Math.floor(e.getUTCFullYear()/t)*t),e.setUTCMonth(0,1),e.setUTCHours(0,0,0,0)}),(function(e,n){e.setUTCFullYear(e.getUTCFullYear()+n*t)})):null};var Fy=Ly,Py=Ly.range;function Iy(t){if(0<=t.y&&t.y<100){var e=new Date(-1,t.m,t.d,t.H,t.M,t.S,t.L);return e.setFullYear(t.y),e}return new Date(t.y,t.m,t.d,t.H,t.M,t.S,t.L)}function jy(t){if(0<=t.y&&t.y<100){var e=new Date(Date.UTC(-1,t.m,t.d,t.H,t.M,t.S,t.L));return e.setUTCFullYear(t.y),e}return new Date(Date.UTC(t.y,t.m,t.d,t.H,t.M,t.S,t.L))}function Ry(t,e,n){return{y:t,m:e,d:n,H:0,M:0,S:0,L:0}}function Yy(t){var e=t.dateTime,n=t.date,r=t.time,i=t.periods,a=t.days,o=t.shortDays,s=t.months,c=t.shortMonths,u=Qy(i),l=Ky(i),h=Qy(a),f=Ky(a),d=Qy(o),p=Ky(o),g=Qy(s),y=Ky(s),v=Qy(c),m=Ky(c),b={a:function(t){return o[t.getDay()]},A:function(t){return a[t.getDay()]},b:function(t){return c[t.getMonth()]},B:function(t){return s[t.getMonth()]},c:null,d:xv,e:xv,f:Tv,H:_v,I:kv,j:wv,L:Ev,m:Cv,M:Sv,p:function(t){return i[+(t.getHours()>=12)]},q:function(t){return 1+~~(t.getMonth()/3)},Q:em,s:nm,S:Av,u:Mv,U:Ov,V:Dv,w:Nv,W:Bv,x:null,X:null,y:Lv,Y:Fv,Z:Pv,"%":tm},x={a:function(t){return o[t.getUTCDay()]},A:function(t){return a[t.getUTCDay()]},b:function(t){return c[t.getUTCMonth()]},B:function(t){return s[t.getUTCMonth()]},c:null,d:Iv,e:Iv,f:Uv,H:jv,I:Rv,j:Yv,L:zv,m:$v,M:Wv,p:function(t){return i[+(t.getUTCHours()>=12)]},q:function(t){return 1+~~(t.getUTCMonth()/3)},Q:em,s:nm,S:Vv,u:Hv,U:Gv,V:qv,w:Xv,W:Zv,x:null,X:null,y:Jv,Y:Qv,Z:Kv,"%":tm},_={a:function(t,e,n){var r=d.exec(e.slice(n));return r?(t.w=p[r[0].toLowerCase()],n+r[0].length):-1},A:function(t,e,n){var r=h.exec(e.slice(n));return r?(t.w=f[r[0].toLowerCase()],n+r[0].length):-1},b:function(t,e,n){var r=v.exec(e.slice(n));return r?(t.m=m[r[0].toLowerCase()],n+r[0].length):-1},B:function(t,e,n){var r=g.exec(e.slice(n));return r?(t.m=y[r[0].toLowerCase()],n+r[0].length):-1},c:function(t,n,r){return E(t,e,n,r)},d:lv,e:lv,f:yv,H:fv,I:fv,j:hv,L:gv,m:uv,M:dv,p:function(t,e,n){var r=u.exec(e.slice(n));return r?(t.p=l[r[0].toLowerCase()],n+r[0].length):-1},q:cv,Q:mv,s:bv,S:pv,u:ev,U:nv,V:rv,w:tv,W:iv,x:function(t,e,r){return E(t,n,e,r)},X:function(t,e,n){return E(t,r,e,n)},y:ov,Y:av,Z:sv,"%":vv};function k(t,e){return function(n){var r,i,a,o=[],s=-1,c=0,u=t.length;for(n instanceof Date||(n=new Date(+n));++s53)return null;"w"in a||(a.w=1),"Z"in a?(i=(r=jy(Ry(a.y,0,1))).getUTCDay(),r=i>4||0===i?my.ceil(r):my(r),r=Ny.offset(r,7*(a.V-1)),a.y=r.getUTCFullYear(),a.m=r.getUTCMonth(),a.d=r.getUTCDate()+(a.w+6)%7):(i=(r=Iy(Ry(a.y,0,1))).getDay(),r=i>4||0===i?Ug.ceil(r):Ug(r),r=ny.offset(r,7*(a.V-1)),a.y=r.getFullYear(),a.m=r.getMonth(),a.d=r.getDate()+(a.w+6)%7)}else("W"in a||"U"in a)&&("w"in a||(a.w="u"in a?a.u%7:"W"in a?1:0),i="Z"in a?jy(Ry(a.y,0,1)).getUTCDay():Iy(Ry(a.y,0,1)).getDay(),a.m=0,a.d="W"in a?(a.w+6)%7+7*a.W-(i+5)%7:a.w+7*a.U-(i+6)%7);return"Z"in a?(a.H+=a.Z/100|0,a.M+=a.Z%100,jy(a)):Iy(a)}}function E(t,e,n,r){for(var i,a,o=0,s=e.length,c=n.length;o=c)return-1;if(37===(i=e.charCodeAt(o++))){if(i=e.charAt(o++),!(a=_[i in Hy?e.charAt(o++):i])||(r=a(t,n,r))<0)return-1}else if(i!=n.charCodeAt(r++))return-1}return r}return(b.x=k(n,b),b.X=k(r,b),b.c=k(e,b),x.x=k(n,x),x.X=k(r,x),x.c=k(e,x),{format:function(t){var e=k(t+="",b);return e.toString=function(){return t},e},parse:function(t){var e=w(t+="",!1);return e.toString=function(){return t},e},utcFormat:function(t){var e=k(t+="",x);return e.toString=function(){return t},e},utcParse:function(t){var e=w(t+="",!0);return e.toString=function(){return t},e}})}var zy,Uy,$y,Wy,Vy,Hy={"-":"",_:" ",0:"0"},Gy=/^\s*\d+/,qy=/^%/,Xy=/[\\^$*+?|[\]().{}]/g;function Zy(t,e,n){var r=t<0?"-":"",i=(r?-t:t)+"",a=i.length;return r+(a68?1900:2e3),n+r[0].length):-1}function sv(t,e,n){var r=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(e.slice(n,n+6));return r?(t.Z=r[1]?0:-(r[2]+(r[3]||"00")),n+r[0].length):-1}function cv(t,e,n){var r=Gy.exec(e.slice(n,n+1));return r?(t.q=3*r[0]-3,n+r[0].length):-1}function uv(t,e,n){var r=Gy.exec(e.slice(n,n+2));return r?(t.m=r[0]-1,n+r[0].length):-1}function lv(t,e,n){var r=Gy.exec(e.slice(n,n+2));return r?(t.d=+r[0],n+r[0].length):-1}function hv(t,e,n){var r=Gy.exec(e.slice(n,n+3));return r?(t.m=0,t.d=+r[0],n+r[0].length):-1}function fv(t,e,n){var r=Gy.exec(e.slice(n,n+2));return r?(t.H=+r[0],n+r[0].length):-1}function dv(t,e,n){var r=Gy.exec(e.slice(n,n+2));return r?(t.M=+r[0],n+r[0].length):-1}function pv(t,e,n){var r=Gy.exec(e.slice(n,n+2));return r?(t.S=+r[0],n+r[0].length):-1}function gv(t,e,n){var r=Gy.exec(e.slice(n,n+3));return r?(t.L=+r[0],n+r[0].length):-1}function yv(t,e,n){var r=Gy.exec(e.slice(n,n+6));return r?(t.L=Math.floor(r[0]/1e3),n+r[0].length):-1}function vv(t,e,n){var r=qy.exec(e.slice(n,n+1));return r?n+r[0].length:-1}function mv(t,e,n){var r=Gy.exec(e.slice(n));return r?(t.Q=+r[0],n+r[0].length):-1}function bv(t,e,n){var r=Gy.exec(e.slice(n));return r?(t.s=+r[0],n+r[0].length):-1}function xv(t,e){return Zy(t.getDate(),e,2)}function _v(t,e){return Zy(t.getHours(),e,2)}function kv(t,e){return Zy(t.getHours()%12||12,e,2)}function wv(t,e){return Zy(1+ny.count(Fg(t),t),e,3)}function Ev(t,e){return Zy(t.getMilliseconds(),e,3)}function Tv(t,e){return Ev(t,e)+"000"}function Cv(t,e){return Zy(t.getMonth()+1,e,2)}function Sv(t,e){return Zy(t.getMinutes(),e,2)}function Av(t,e){return Zy(t.getSeconds(),e,2)}function Mv(t){var e=t.getDay();return 0===e?7:e}function Ov(t,e){return Zy(zg.count(Fg(t)-1,t),e,2)}function Dv(t,e){var n=t.getDay();return t=n>=4||0===n?Vg(t):Vg.ceil(t),Zy(Vg.count(Fg(t),t)+(4===Fg(t).getDay()),e,2)}function Nv(t){return t.getDay()}function Bv(t,e){return Zy(Ug.count(Fg(t)-1,t),e,2)}function Lv(t,e){return Zy(t.getFullYear()%100,e,2)}function Fv(t,e){return Zy(t.getFullYear()%1e4,e,4)}function Pv(t){var e=t.getTimezoneOffset();return(e>0?"-":(e*=-1,"+"))+Zy(e/60|0,"0",2)+Zy(e%60,"0",2)}function Iv(t,e){return Zy(t.getUTCDate(),e,2)}function jv(t,e){return Zy(t.getUTCHours(),e,2)}function Rv(t,e){return Zy(t.getUTCHours()%12||12,e,2)}function Yv(t,e){return Zy(1+Ny.count(Fy(t),t),e,3)}function zv(t,e){return Zy(t.getUTCMilliseconds(),e,3)}function Uv(t,e){return zv(t,e)+"000"}function $v(t,e){return Zy(t.getUTCMonth()+1,e,2)}function Wv(t,e){return Zy(t.getUTCMinutes(),e,2)}function Vv(t,e){return Zy(t.getUTCSeconds(),e,2)}function Hv(t){var e=t.getUTCDay();return 0===e?7:e}function Gv(t,e){return Zy(vy.count(Fy(t)-1,t),e,2)}function qv(t,e){var n=t.getUTCDay();return t=n>=4||0===n?_y(t):_y.ceil(t),Zy(_y.count(Fy(t),t)+(4===Fy(t).getUTCDay()),e,2)}function Xv(t){return t.getUTCDay()}function Zv(t,e){return Zy(my.count(Fy(t)-1,t),e,2)}function Jv(t,e){return Zy(t.getUTCFullYear()%100,e,2)}function Qv(t,e){return Zy(t.getUTCFullYear()%1e4,e,4)}function Kv(){return"+0000"}function tm(){return"%"}function em(t){return+t}function nm(t){return Math.floor(+t/1e3)}function rm(t){return zy=Yy(t),Uy=zy.format,$y=zy.parse,Wy=zy.utcFormat,Vy=zy.utcParse,zy}rm({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});function im(t){return new Date(t)}function am(t){return t instanceof Date?+t:+new Date(+t)}function om(t,e,n,r,a,o,s,c,u){var l=ig(Jp,Jp),h=l.invert,f=l.domain,d=u(".%L"),p=u(":%S"),g=u("%I:%M"),y=u("%I %p"),v=u("%a %d"),m=u("%b %d"),b=u("%B"),x=u("%Y"),_=[[s,1,1e3],[s,5,5e3],[s,15,15e3],[s,30,3e4],[o,1,6e4],[o,5,3e5],[o,15,9e5],[o,30,18e5],[a,1,36e5],[a,3,108e5],[a,6,216e5],[a,12,432e5],[r,1,864e5],[r,2,1728e5],[n,1,6048e5],[e,1,2592e6],[e,3,7776e6],[t,1,31536e6]];function k(i){return(s(i)1)&&(t-=Math.floor(t));var e=Math.abs(t-.5);return qb.h=360*t-100,qb.s=1.5-1.5*e,qb.l=.8-.9*e,qb+""},Zb=Ge(),Jb=Math.PI/3,Qb=2*Math.PI/3,Kb=function(t){var e;return t=(.5-t)*Math.PI,Zb.r=255*(e=Math.sin(t))*e,Zb.g=255*(e=Math.sin(t+Jb))*e,Zb.b=255*(e=Math.sin(t+Qb))*e,Zb+""},tx=function(t){return t=Math.max(0,Math.min(1,t)),"rgb("+Math.max(0,Math.min(255,Math.round(34.61+t*(1172.33-t*(10793.56-t*(33300.12-t*(38394.49-14825.05*t)))))))+", "+Math.max(0,Math.min(255,Math.round(23.31+t*(557.33+t*(1225.33-t*(3574.96-t*(1073.77+707.56*t)))))))+", "+Math.max(0,Math.min(255,Math.round(27.2+t*(3211.1-t*(15327.97-t*(27814-t*(22569.18-6838.66*t)))))))+")"};function ex(t){var e=t.length;return function(n){return t[Math.max(0,Math.min(e-1,Math.floor(n*e)))]}}var nx=ex(Nm("44015444025645045745055946075a46085c460a5d460b5e470d60470e6147106347116447136548146748166848176948186a481a6c481b6d481c6e481d6f481f70482071482173482374482475482576482677482878482979472a7a472c7a472d7b472e7c472f7d46307e46327e46337f463480453581453781453882443983443a83443b84433d84433e85423f854240864241864142874144874045884046883f47883f48893e49893e4a893e4c8a3d4d8a3d4e8a3c4f8a3c508b3b518b3b528b3a538b3a548c39558c39568c38588c38598c375a8c375b8d365c8d365d8d355e8d355f8d34608d34618d33628d33638d32648e32658e31668e31678e31688e30698e306a8e2f6b8e2f6c8e2e6d8e2e6e8e2e6f8e2d708e2d718e2c718e2c728e2c738e2b748e2b758e2a768e2a778e2a788e29798e297a8e297b8e287c8e287d8e277e8e277f8e27808e26818e26828e26828e25838e25848e25858e24868e24878e23888e23898e238a8d228b8d228c8d228d8d218e8d218f8d21908d21918c20928c20928c20938c1f948c1f958b1f968b1f978b1f988b1f998a1f9a8a1e9b8a1e9c891e9d891f9e891f9f881fa0881fa1881fa1871fa28720a38620a48621a58521a68522a78522a88423a98324aa8325ab8225ac8226ad8127ad8128ae8029af7f2ab07f2cb17e2db27d2eb37c2fb47c31b57b32b67a34b67935b77937b87838b9773aba763bbb753dbc743fbc7340bd7242be7144bf7046c06f48c16e4ac16d4cc26c4ec36b50c46a52c56954c56856c66758c7655ac8645cc8635ec96260ca6063cb5f65cb5e67cc5c69cd5b6ccd5a6ece5870cf5773d05675d05477d1537ad1517cd2507fd34e81d34d84d44b86d54989d5488bd6468ed64590d74393d74195d84098d83e9bd93c9dd93ba0da39a2da37a5db36a8db34aadc32addc30b0dd2fb2dd2db5de2bb8de29bade28bddf26c0df25c2df23c5e021c8e020cae11fcde11dd0e11cd2e21bd5e21ad8e219dae319dde318dfe318e2e418e5e419e7e419eae51aece51befe51cf1e51df4e61ef6e620f8e621fbe723fde725")),rx=ex(Nm("00000401000501010601010802010902020b02020d03030f03031204041405041606051806051a07061c08071e0907200a08220b09240c09260d0a290e0b2b100b2d110c2f120d31130d34140e36150e38160f3b180f3d19103f1a10421c10441d11471e114920114b21114e22115024125325125527125829115a2a115c2c115f2d11612f116331116533106734106936106b38106c390f6e3b0f703d0f713f0f72400f74420f75440f764510774710784910784a10794c117a4e117b4f127b51127c52137c54137d56147d57157e59157e5a167e5c167f5d177f5f187f601880621980641a80651a80671b80681c816a1c816b1d816d1d816e1e81701f81721f817320817521817621817822817922827b23827c23827e24828025828125818326818426818627818827818928818b29818c29818e2a81902a81912b81932b80942c80962c80982d80992d809b2e7f9c2e7f9e2f7fa02f7fa1307ea3307ea5317ea6317da8327daa337dab337cad347cae347bb0357bb2357bb3367ab5367ab73779b83779ba3878bc3978bd3977bf3a77c03a76c23b75c43c75c53c74c73d73c83e73ca3e72cc3f71cd4071cf4070d0416fd2426fd3436ed5446dd6456cd8456cd9466bdb476adc4869de4968df4a68e04c67e24d66e34e65e44f64e55064e75263e85362e95462ea5661eb5760ec5860ed5a5fee5b5eef5d5ef05f5ef1605df2625df2645cf3655cf4675cf4695cf56b5cf66c5cf66e5cf7705cf7725cf8745cf8765cf9785df9795df97b5dfa7d5efa7f5efa815ffb835ffb8560fb8761fc8961fc8a62fc8c63fc8e64fc9065fd9266fd9467fd9668fd9869fd9a6afd9b6bfe9d6cfe9f6dfea16efea36ffea571fea772fea973feaa74feac76feae77feb078feb27afeb47bfeb67cfeb77efeb97ffebb81febd82febf84fec185fec287fec488fec68afec88cfeca8dfecc8ffecd90fecf92fed194fed395fed597fed799fed89afdda9cfddc9efddea0fde0a1fde2a3fde3a5fde5a7fde7a9fde9aafdebacfcecaefceeb0fcf0b2fcf2b4fcf4b6fcf6b8fcf7b9fcf9bbfcfbbdfcfdbf")),ix=ex(Nm("00000401000501010601010802010a02020c02020e03021004031204031405041706041907051b08051d09061f0a07220b07240c08260d08290e092b10092d110a30120a32140b34150b37160b39180c3c190c3e1b0c411c0c431e0c451f0c48210c4a230c4c240c4f260c51280b53290b552b0b572d0b592f0a5b310a5c320a5e340a5f3609613809623909633b09643d09653e0966400a67420a68440a68450a69470b6a490b6a4a0c6b4c0c6b4d0d6c4f0d6c510e6c520e6d540f6d550f6d57106e59106e5a116e5c126e5d126e5f136e61136e62146e64156e65156e67166e69166e6a176e6c186e6d186e6f196e71196e721a6e741a6e751b6e771c6d781c6d7a1d6d7c1d6d7d1e6d7f1e6c801f6c82206c84206b85216b87216b88226a8a226a8c23698d23698f24699025689225689326679526679727669827669a28659b29649d29649f2a63a02a63a22b62a32c61a52c60a62d60a82e5fa92e5eab2f5ead305dae305cb0315bb1325ab3325ab43359b63458b73557b93556ba3655bc3754bd3853bf3952c03a51c13a50c33b4fc43c4ec63d4dc73e4cc83f4bca404acb4149cc4248ce4347cf4446d04545d24644d34743d44842d54a41d74b3fd84c3ed94d3dda4e3cdb503bdd513ade5238df5337e05536e15635e25734e35933e45a31e55c30e65d2fe75e2ee8602de9612bea632aeb6429eb6628ec6726ed6925ee6a24ef6c23ef6e21f06f20f1711ff1731df2741cf3761bf37819f47918f57b17f57d15f67e14f68013f78212f78410f8850ff8870ef8890cf98b0bf98c0af98e09fa9008fa9207fa9407fb9606fb9706fb9906fb9b06fb9d07fc9f07fca108fca309fca50afca60cfca80dfcaa0ffcac11fcae12fcb014fcb216fcb418fbb61afbb81dfbba1ffbbc21fbbe23fac026fac228fac42afac62df9c72ff9c932f9cb35f8cd37f8cf3af7d13df7d340f6d543f6d746f5d949f5db4cf4dd4ff4df53f4e156f3e35af3e55df2e661f2e865f2ea69f1ec6df1ed71f1ef75f1f179f2f27df2f482f3f586f3f68af4f88ef5f992f6fa96f8fb9af9fc9dfafda1fcffa4")),ax=ex(Nm("0d088710078813078916078a19068c1b068d1d068e20068f2206902406912605912805922a05932c05942e05952f059631059733059735049837049938049a3a049a3c049b3e049c3f049c41049d43039e44039e46039f48039f4903a04b03a14c02a14e02a25002a25102a35302a35502a45601a45801a45901a55b01a55c01a65e01a66001a66100a76300a76400a76600a76700a86900a86a00a86c00a86e00a86f00a87100a87201a87401a87501a87701a87801a87a02a87b02a87d03a87e03a88004a88104a78305a78405a78606a68707a68808a68a09a58b0aa58d0ba58e0ca48f0da4910ea3920fa39410a29511a19613a19814a099159f9a169f9c179e9d189d9e199da01a9ca11b9ba21d9aa31e9aa51f99a62098a72197a82296aa2395ab2494ac2694ad2793ae2892b02991b12a90b22b8fb32c8eb42e8db52f8cb6308bb7318ab83289ba3388bb3488bc3587bd3786be3885bf3984c03a83c13b82c23c81c33d80c43e7fc5407ec6417dc7427cc8437bc9447aca457acb4679cc4778cc4977cd4a76ce4b75cf4c74d04d73d14e72d24f71d35171d45270d5536fd5546ed6556dd7566cd8576bd9586ada5a6ada5b69db5c68dc5d67dd5e66de5f65de6164df6263e06363e16462e26561e26660e3685fe4695ee56a5de56b5de66c5ce76e5be76f5ae87059e97158e97257ea7457eb7556eb7655ec7754ed7953ed7a52ee7b51ef7c51ef7e50f07f4ff0804ef1814df1834cf2844bf3854bf3874af48849f48948f58b47f58c46f68d45f68f44f79044f79143f79342f89441f89540f9973ff9983ef99a3efa9b3dfa9c3cfa9e3bfb9f3afba139fba238fca338fca537fca636fca835fca934fdab33fdac33fdae32fdaf31fdb130fdb22ffdb42ffdb52efeb72dfeb82cfeba2cfebb2bfebd2afebe2afec029fdc229fdc328fdc527fdc627fdc827fdca26fdcb26fccd25fcce25fcd025fcd225fbd324fbd524fbd724fad824fada24f9dc24f9dd25f8df25f8e125f7e225f7e425f6e626f6e826f5e926f5eb27f4ed27f3ee27f3f027f2f227f1f426f1f525f0f724f0f921")),ox=function(t){return ke(ne(t).call(document.documentElement))},sx=0;function cx(){return new ux}function ux(){this._="@"+(++sx).toString(36)}ux.prototype=cx.prototype={constructor:ux,get:function(t){for(var e=this._;!(e in t);)if(!(t=t.parentNode))return;return t[e]},set:function(t,e){return t[this._]=e},remove:function(t){return this._ in t&&delete t[this._]},toString:function(){return this._}};var lx=function(t){return"string"==typeof t?new be([document.querySelectorAll(t)],[document.documentElement]):new be([null==t?[]:t],me)},hx=function(t,e){null==e&&(e=Mn().touches);for(var n=0,r=e?e.length:0,i=new Array(r);n1?0:t<-1?xx:Math.acos(t)}function Ex(t){return t>=1?_x:t<=-1?-_x:Math.asin(t)}function Tx(t){return t.innerRadius}function Cx(t){return t.outerRadius}function Sx(t){return t.startAngle}function Ax(t){return t.endAngle}function Mx(t){return t&&t.padAngle}function Ox(t,e,n,r,i,a,o,s){var c=n-t,u=r-e,l=o-i,h=s-a,f=h*c-l*u;if(!(f*f<1e-12))return[t+(f=(l*(e-a)-h*(t-i))/f)*c,e+f*u]}function Dx(t,e,n,r,i,a,o){var s=t-n,c=e-r,u=(o?a:-a)/bx(s*s+c*c),l=u*c,h=-u*s,f=t+l,d=e+h,p=n+l,g=r+h,y=(f+p)/2,v=(d+g)/2,m=p-f,b=g-d,x=m*m+b*b,_=i-a,k=f*g-p*d,w=(b<0?-1:1)*bx(yx(0,_*_*x-k*k)),E=(k*b-m*w)/x,T=(-k*m-b*w)/x,C=(k*b+m*w)/x,S=(-k*m+b*w)/x,A=E-y,M=T-v,O=C-y,D=S-v;return A*A+M*M>O*O+D*D&&(E=C,T=S),{cx:E,cy:T,x01:-l,y01:-h,x11:E*(i/_-1),y11:T*(i/_-1)}}var Nx=function(){var t=Tx,e=Cx,n=fx(0),r=null,i=Sx,a=Ax,o=Mx,s=null;function c(){var c,u,l=+t.apply(this,arguments),h=+e.apply(this,arguments),f=i.apply(this,arguments)-_x,d=a.apply(this,arguments)-_x,p=dx(d-f),g=d>f;if(s||(s=c=Ui()),h1e-12)if(p>kx-1e-12)s.moveTo(h*gx(f),h*mx(f)),s.arc(0,0,h,f,d,!g),l>1e-12&&(s.moveTo(l*gx(d),l*mx(d)),s.arc(0,0,l,d,f,g));else{var y,v,m=f,b=d,x=f,_=d,k=p,w=p,E=o.apply(this,arguments)/2,T=E>1e-12&&(r?+r.apply(this,arguments):bx(l*l+h*h)),C=vx(dx(h-l)/2,+n.apply(this,arguments)),S=C,A=C;if(T>1e-12){var M=Ex(T/l*mx(E)),O=Ex(T/h*mx(E));(k-=2*M)>1e-12?(x+=M*=g?1:-1,_-=M):(k=0,x=_=(f+d)/2),(w-=2*O)>1e-12?(m+=O*=g?1:-1,b-=O):(w=0,m=b=(f+d)/2)}var D=h*gx(m),N=h*mx(m),B=l*gx(_),L=l*mx(_);if(C>1e-12){var F,P=h*gx(b),I=h*mx(b),j=l*gx(x),R=l*mx(x);if(p1e-12?A>1e-12?(y=Dx(j,R,D,N,h,A,g),v=Dx(P,I,B,L,h,A,g),s.moveTo(y.cx+y.x01,y.cy+y.y01),A1e-12&&k>1e-12?S>1e-12?(y=Dx(B,L,P,I,l,-S,g),v=Dx(D,N,j,R,l,-S,g),s.lineTo(y.cx+y.x01,y.cy+y.y01),S=l;--h)s.point(y[h],v[h]);s.lineEnd(),s.areaEnd()}g&&(y[u]=+t(f,u,c),v[u]=+n(f,u,c),s.point(e?+e(f,u,c):y[u],r?+r(f,u,c):v[u]))}if(d)return s=null,d+""||null}function u(){return Ix().defined(i).curve(o).context(a)}return c.x=function(n){return arguments.length?(t="function"==typeof n?n:fx(+n),e=null,c):t},c.x0=function(e){return arguments.length?(t="function"==typeof e?e:fx(+e),c):t},c.x1=function(t){return arguments.length?(e=null==t?null:"function"==typeof t?t:fx(+t),c):e},c.y=function(t){return arguments.length?(n="function"==typeof t?t:fx(+t),r=null,c):n},c.y0=function(t){return arguments.length?(n="function"==typeof t?t:fx(+t),c):n},c.y1=function(t){return arguments.length?(r=null==t?null:"function"==typeof t?t:fx(+t),c):r},c.lineX0=c.lineY0=function(){return u().x(t).y(n)},c.lineY1=function(){return u().x(t).y(r)},c.lineX1=function(){return u().x(e).y(n)},c.defined=function(t){return arguments.length?(i="function"==typeof t?t:fx(!!t),c):i},c.curve=function(t){return arguments.length?(o=t,null!=a&&(s=o(a)),c):o},c.context=function(t){return arguments.length?(null==t?a=s=null:s=o(a=t),c):a},c},Rx=function(t,e){return et?1:e>=t?0:NaN},Yx=function(t){return t},zx=function(){var t=Yx,e=Rx,n=null,r=fx(0),i=fx(kx),a=fx(0);function o(o){var s,c,u,l,h,f=o.length,d=0,p=new Array(f),g=new Array(f),y=+r.apply(this,arguments),v=Math.min(kx,Math.max(-kx,i.apply(this,arguments)-y)),m=Math.min(Math.abs(v)/f,a.apply(this,arguments)),b=m*(v<0?-1:1);for(s=0;s0&&(d+=h);for(null!=e?p.sort((function(t,n){return e(g[t],g[n])})):null!=n&&p.sort((function(t,e){return n(o[t],o[e])})),s=0,u=d?(v-f*b)/d:0;s0?h*u:0)+b,g[c]={data:o[c],index:s,value:h,startAngle:y,endAngle:l,padAngle:m};return g}return o.value=function(e){return arguments.length?(t="function"==typeof e?e:fx(+e),o):t},o.sortValues=function(t){return arguments.length?(e=t,n=null,o):e},o.sort=function(t){return arguments.length?(n=t,e=null,o):n},o.startAngle=function(t){return arguments.length?(r="function"==typeof t?t:fx(+t),o):r},o.endAngle=function(t){return arguments.length?(i="function"==typeof t?t:fx(+t),o):i},o.padAngle=function(t){return arguments.length?(a="function"==typeof t?t:fx(+t),o):a},o},Ux=Wx(Lx);function $x(t){this._curve=t}function Wx(t){function e(e){return new $x(t(e))}return e._curve=t,e}function Vx(t){var e=t.curve;return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t.curve=function(t){return arguments.length?e(Wx(t)):e()._curve},t}$x.prototype={areaStart:function(){this._curve.areaStart()},areaEnd:function(){this._curve.areaEnd()},lineStart:function(){this._curve.lineStart()},lineEnd:function(){this._curve.lineEnd()},point:function(t,e){this._curve.point(e*Math.sin(t),e*-Math.cos(t))}};var Hx=function(){return Vx(Ix().curve(Ux))},Gx=function(){var t=jx().curve(Ux),e=t.curve,n=t.lineX0,r=t.lineX1,i=t.lineY0,a=t.lineY1;return t.angle=t.x,delete t.x,t.startAngle=t.x0,delete t.x0,t.endAngle=t.x1,delete t.x1,t.radius=t.y,delete t.y,t.innerRadius=t.y0,delete t.y0,t.outerRadius=t.y1,delete t.y1,t.lineStartAngle=function(){return Vx(n())},delete t.lineX0,t.lineEndAngle=function(){return Vx(r())},delete t.lineX1,t.lineInnerRadius=function(){return Vx(i())},delete t.lineY0,t.lineOuterRadius=function(){return Vx(a())},delete t.lineY1,t.curve=function(t){return arguments.length?e(Wx(t)):e()._curve},t},qx=function(t,e){return[(e=+e)*Math.cos(t-=Math.PI/2),e*Math.sin(t)]},Xx=Array.prototype.slice;function Zx(t){return t.source}function Jx(t){return t.target}function Qx(t){var e=Zx,n=Jx,r=Fx,i=Px,a=null;function o(){var o,s=Xx.call(arguments),c=e.apply(this,s),u=n.apply(this,s);if(a||(a=o=Ui()),t(a,+r.apply(this,(s[0]=c,s)),+i.apply(this,s),+r.apply(this,(s[0]=u,s)),+i.apply(this,s)),o)return a=null,o+""||null}return o.source=function(t){return arguments.length?(e=t,o):e},o.target=function(t){return arguments.length?(n=t,o):n},o.x=function(t){return arguments.length?(r="function"==typeof t?t:fx(+t),o):r},o.y=function(t){return arguments.length?(i="function"==typeof t?t:fx(+t),o):i},o.context=function(t){return arguments.length?(a=null==t?null:t,o):a},o}function Kx(t,e,n,r,i){t.moveTo(e,n),t.bezierCurveTo(e=(e+r)/2,n,e,i,r,i)}function t_(t,e,n,r,i){t.moveTo(e,n),t.bezierCurveTo(e,n=(n+i)/2,r,n,r,i)}function e_(t,e,n,r,i){var a=qx(e,n),o=qx(e,n=(n+i)/2),s=qx(r,n),c=qx(r,i);t.moveTo(a[0],a[1]),t.bezierCurveTo(o[0],o[1],s[0],s[1],c[0],c[1])}function n_(){return Qx(Kx)}function r_(){return Qx(t_)}function i_(){var t=Qx(e_);return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t}var a_={draw:function(t,e){var n=Math.sqrt(e/xx);t.moveTo(n,0),t.arc(0,0,n,0,kx)}},o_={draw:function(t,e){var n=Math.sqrt(e/5)/2;t.moveTo(-3*n,-n),t.lineTo(-n,-n),t.lineTo(-n,-3*n),t.lineTo(n,-3*n),t.lineTo(n,-n),t.lineTo(3*n,-n),t.lineTo(3*n,n),t.lineTo(n,n),t.lineTo(n,3*n),t.lineTo(-n,3*n),t.lineTo(-n,n),t.lineTo(-3*n,n),t.closePath()}},s_=Math.sqrt(1/3),c_=2*s_,u_={draw:function(t,e){var n=Math.sqrt(e/c_),r=n*s_;t.moveTo(0,-n),t.lineTo(r,0),t.lineTo(0,n),t.lineTo(-r,0),t.closePath()}},l_=Math.sin(xx/10)/Math.sin(7*xx/10),h_=Math.sin(kx/10)*l_,f_=-Math.cos(kx/10)*l_,d_={draw:function(t,e){var n=Math.sqrt(.8908130915292852*e),r=h_*n,i=f_*n;t.moveTo(0,-n),t.lineTo(r,i);for(var a=1;a<5;++a){var o=kx*a/5,s=Math.cos(o),c=Math.sin(o);t.lineTo(c*n,-s*n),t.lineTo(s*r-c*i,c*r+s*i)}t.closePath()}},p_={draw:function(t,e){var n=Math.sqrt(e),r=-n/2;t.rect(r,r,n,n)}},g_=Math.sqrt(3),y_={draw:function(t,e){var n=-Math.sqrt(e/(3*g_));t.moveTo(0,2*n),t.lineTo(-g_*n,-n),t.lineTo(g_*n,-n),t.closePath()}},v_=Math.sqrt(3)/2,m_=1/Math.sqrt(12),b_=3*(m_/2+1),x_={draw:function(t,e){var n=Math.sqrt(e/b_),r=n/2,i=n*m_,a=r,o=n*m_+n,s=-a,c=o;t.moveTo(r,i),t.lineTo(a,o),t.lineTo(s,c),t.lineTo(-.5*r-v_*i,v_*r+-.5*i),t.lineTo(-.5*a-v_*o,v_*a+-.5*o),t.lineTo(-.5*s-v_*c,v_*s+-.5*c),t.lineTo(-.5*r+v_*i,-.5*i-v_*r),t.lineTo(-.5*a+v_*o,-.5*o-v_*a),t.lineTo(-.5*s+v_*c,-.5*c-v_*s),t.closePath()}},__=[a_,o_,u_,p_,d_,y_,x_],k_=function(){var t=fx(a_),e=fx(64),n=null;function r(){var r;if(n||(n=r=Ui()),t.apply(this,arguments).draw(n,+e.apply(this,arguments)),r)return n=null,r+""||null}return r.type=function(e){return arguments.length?(t="function"==typeof e?e:fx(e),r):t},r.size=function(t){return arguments.length?(e="function"==typeof t?t:fx(+t),r):e},r.context=function(t){return arguments.length?(n=null==t?null:t,r):n},r},w_=function(){};function E_(t,e,n){t._context.bezierCurveTo((2*t._x0+t._x1)/3,(2*t._y0+t._y1)/3,(t._x0+2*t._x1)/3,(t._y0+2*t._y1)/3,(t._x0+4*t._x1+e)/6,(t._y0+4*t._y1+n)/6)}function T_(t){this._context=t}T_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:E_(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:E_(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}};var C_=function(t){return new T_(t)};function S_(t){this._context=t}S_.prototype={areaStart:w_,areaEnd:w_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x2,this._y2),this._context.closePath();break;case 2:this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break;case 3:this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4)}},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._x2=t,this._y2=e;break;case 1:this._point=2,this._x3=t,this._y3=e;break;case 2:this._point=3,this._x4=t,this._y4=e,this._context.moveTo((this._x0+4*this._x1+t)/6,(this._y0+4*this._y1+e)/6);break;default:E_(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}};var A_=function(t){return new S_(t)};function M_(t){this._context=t}M_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var n=(this._x0+4*this._x1+t)/6,r=(this._y0+4*this._y1+e)/6;this._line?this._context.lineTo(n,r):this._context.moveTo(n,r);break;case 3:this._point=4;default:E_(this,t,e)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=e}};var O_=function(t){return new M_(t)};function D_(t,e){this._basis=new T_(t),this._beta=e}D_.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var t=this._x,e=this._y,n=t.length-1;if(n>0)for(var r,i=t[0],a=e[0],o=t[n]-i,s=e[n]-a,c=-1;++c<=n;)r=c/n,this._basis.point(this._beta*t[c]+(1-this._beta)*(i+r*o),this._beta*e[c]+(1-this._beta)*(a+r*s));this._x=this._y=null,this._basis.lineEnd()},point:function(t,e){this._x.push(+t),this._y.push(+e)}};var N_=function t(e){function n(t){return 1===e?new T_(t):new D_(t,e)}return n.beta=function(e){return t(+e)},n}(.85);function B_(t,e,n){t._context.bezierCurveTo(t._x1+t._k*(t._x2-t._x0),t._y1+t._k*(t._y2-t._y0),t._x2+t._k*(t._x1-e),t._y2+t._k*(t._y1-n),t._x2,t._y2)}function L_(t,e){this._context=t,this._k=(1-e)/6}L_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:B_(this,this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2,this._x1=t,this._y1=e;break;case 2:this._point=3;default:B_(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var F_=function t(e){function n(t){return new L_(t,e)}return n.tension=function(e){return t(+e)},n}(0);function P_(t,e){this._context=t,this._k=(1-e)/6}P_.prototype={areaStart:w_,areaEnd:w_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._x3=t,this._y3=e;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=e);break;case 2:this._point=3,this._x5=t,this._y5=e;break;default:B_(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var I_=function t(e){function n(t){return new P_(t,e)}return n.tension=function(e){return t(+e)},n}(0);function j_(t,e){this._context=t,this._k=(1-e)/6}j_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:B_(this,t,e)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var R_=function t(e){function n(t){return new j_(t,e)}return n.tension=function(e){return t(+e)},n}(0);function Y_(t,e,n){var r=t._x1,i=t._y1,a=t._x2,o=t._y2;if(t._l01_a>1e-12){var s=2*t._l01_2a+3*t._l01_a*t._l12_a+t._l12_2a,c=3*t._l01_a*(t._l01_a+t._l12_a);r=(r*s-t._x0*t._l12_2a+t._x2*t._l01_2a)/c,i=(i*s-t._y0*t._l12_2a+t._y2*t._l01_2a)/c}if(t._l23_a>1e-12){var u=2*t._l23_2a+3*t._l23_a*t._l12_a+t._l12_2a,l=3*t._l23_a*(t._l23_a+t._l12_a);a=(a*u+t._x1*t._l23_2a-e*t._l12_2a)/l,o=(o*u+t._y1*t._l23_2a-n*t._l12_2a)/l}t._context.bezierCurveTo(r,i,a,o,t._x2,t._y2)}function z_(t,e){this._context=t,this._alpha=e}z_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){if(t=+t,e=+e,this._point){var n=this._x2-t,r=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;break;case 2:this._point=3;default:Y_(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var U_=function t(e){function n(t){return e?new z_(t,e):new L_(t,0)}return n.alpha=function(e){return t(+e)},n}(.5);function $_(t,e){this._context=t,this._alpha=e}$_.prototype={areaStart:w_,areaEnd:w_,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,e){if(t=+t,e=+e,this._point){var n=this._x2-t,r=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=t,this._y3=e;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=e);break;case 2:this._point=3,this._x5=t,this._y5=e;break;default:Y_(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var W_=function t(e){function n(t){return e?new $_(t,e):new P_(t,0)}return n.alpha=function(e){return t(+e)},n}(.5);function V_(t,e){this._context=t,this._alpha=e}V_.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,e){if(t=+t,e=+e,this._point){var n=this._x2-t,r=this._y2-e;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(n*n+r*r,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:Y_(this,t,e)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=e}};var H_=function t(e){function n(t){return e?new V_(t,e):new j_(t,0)}return n.alpha=function(e){return t(+e)},n}(.5);function G_(t){this._context=t}G_.prototype={areaStart:w_,areaEnd:w_,lineStart:function(){this._point=0},lineEnd:function(){this._point&&this._context.closePath()},point:function(t,e){t=+t,e=+e,this._point?this._context.lineTo(t,e):(this._point=1,this._context.moveTo(t,e))}};var q_=function(t){return new G_(t)};function X_(t){return t<0?-1:1}function Z_(t,e,n){var r=t._x1-t._x0,i=e-t._x1,a=(t._y1-t._y0)/(r||i<0&&-0),o=(n-t._y1)/(i||r<0&&-0),s=(a*i+o*r)/(r+i);return(X_(a)+X_(o))*Math.min(Math.abs(a),Math.abs(o),.5*Math.abs(s))||0}function J_(t,e){var n=t._x1-t._x0;return n?(3*(t._y1-t._y0)/n-e)/2:e}function Q_(t,e,n){var r=t._x0,i=t._y0,a=t._x1,o=t._y1,s=(a-r)/3;t._context.bezierCurveTo(r+s,i+s*e,a-s,o-s*n,a,o)}function K_(t){this._context=t}function tk(t){this._context=new ek(t)}function ek(t){this._context=t}function nk(t){return new K_(t)}function rk(t){return new tk(t)}function ik(t){this._context=t}function ak(t){var e,n,r=t.length-1,i=new Array(r),a=new Array(r),o=new Array(r);for(i[0]=0,a[0]=2,o[0]=t[0]+2*t[1],e=1;e=0;--e)i[e]=(o[e]-i[e+1])/a[e];for(a[r-1]=(t[r]+i[r-1])/2,e=0;e=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(t,e){switch(t=+t,e=+e,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,e):this._context.moveTo(t,e);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,e),this._context.lineTo(t,e);else{var n=this._x*(1-this._t)+t*this._t;this._context.lineTo(n,this._y),this._context.lineTo(n,e)}}this._x=t,this._y=e}};var ck=function(t){return new sk(t,.5)};function uk(t){return new sk(t,0)}function lk(t){return new sk(t,1)}var hk=function(t,e){if((i=t.length)>1)for(var n,r,i,a=1,o=t[e[0]],s=o.length;a=0;)n[e]=e;return n};function dk(t,e){return t[e]}var pk=function(){var t=fx([]),e=fk,n=hk,r=dk;function i(i){var a,o,s=t.apply(this,arguments),c=i.length,u=s.length,l=new Array(u);for(a=0;a0){for(var n,r,i,a=0,o=t[0].length;a0)for(var n,r,i,a,o,s,c=0,u=t[e[0]].length;c0?(r[0]=a,r[1]=a+=i):i<0?(r[1]=o,r[0]=o+=i):(r[0]=0,r[1]=i)},vk=function(t,e){if((n=t.length)>0){for(var n,r=0,i=t[e[0]],a=i.length;r0&&(r=(n=t[e[0]]).length)>0){for(var n,r,i,a=0,o=1;oa&&(a=e,r=n);return r}var _k=function(t){var e=t.map(kk);return fk(t).sort((function(t,n){return e[t]-e[n]}))};function kk(t){for(var e,n=0,r=-1,i=t.length;++r0)){if(a/=f,f<0){if(a0){if(a>h)return;a>l&&(l=a)}if(a=r-c,f||!(a<0)){if(a/=f,f<0){if(a>h)return;a>l&&(l=a)}else if(f>0){if(a0)){if(a/=d,d<0){if(a0){if(a>h)return;a>l&&(l=a)}if(a=i-u,d||!(a<0)){if(a/=d,d<0){if(a>h)return;a>l&&(l=a)}else if(d>0){if(a0||h<1)||(l>0&&(t[0]=[c+l*f,u+l*d]),h<1&&(t[1]=[c+h*f,u+h*d]),!0)}}}}}function Uk(t,e,n,r,i){var a=t[1];if(a)return!0;var o,s,c=t[0],u=t.left,l=t.right,h=u[0],f=u[1],d=l[0],p=l[1],g=(h+d)/2,y=(f+p)/2;if(p===f){if(g=r)return;if(h>d){if(c){if(c[1]>=i)return}else c=[g,n];a=[g,i]}else{if(c){if(c[1]1)if(h>d){if(c){if(c[1]>=i)return}else c=[(n-s)/o,n];a=[(i-s)/o,i]}else{if(c){if(c[1]=r)return}else c=[e,o*e+s];a=[r,o*r+s]}else{if(c){if(c[0]=-lw)){var d=c*c+u*u,p=l*l+h*h,g=(h*d-u*p)/f,y=(c*p-l*d)/f,v=Gk.pop()||new qk;v.arc=t,v.site=i,v.x=g+o,v.y=(v.cy=y+s)+Math.sqrt(g*g+y*y),t.circle=v;for(var m=null,b=sw._;b;)if(v.yuw)s=s.L;else{if(!((i=a-iw(s,o))>uw)){r>-uw?(e=s.P,n=s):i>-uw?(e=s,n=s.N):e=n=s;break}if(!s.R){e=s;break}s=s.R}!function(t){ow[t.index]={site:t,halfedges:[]}}(t);var c=Kk(t);if(aw.insert(e,c),e||n){if(e===n)return Zk(e),n=Kk(e.site),aw.insert(c,n),c.edge=n.edge=jk(e.site,c.site),Xk(e),void Xk(n);if(n){Zk(e),Zk(n);var u=e.site,l=u[0],h=u[1],f=t[0]-l,d=t[1]-h,p=n.site,g=p[0]-l,y=p[1]-h,v=2*(f*y-d*g),m=f*f+d*d,b=g*g+y*y,x=[(y*m-d*b)/v+l,(f*b-g*m)/v+h];Yk(n.edge,u,p,x),c.edge=jk(u,t,null,x),n.edge=jk(t,p,null,x),Xk(e),Xk(n)}else c.edge=jk(e.site,c.site)}}function rw(t,e){var n=t.site,r=n[0],i=n[1],a=i-e;if(!a)return r;var o=t.P;if(!o)return-1/0;var s=(n=o.site)[0],c=n[1],u=c-e;if(!u)return s;var l=s-r,h=1/a-1/u,f=l/u;return h?(-f+Math.sqrt(f*f-2*h*(l*l/(-2*u)-c+u/2+i-a/2)))/h+r:(r+s)/2}function iw(t,e){var n=t.N;if(n)return rw(n,e);var r=t.site;return r[1]===e?r[0]:1/0}var aw,ow,sw,cw,uw=1e-6,lw=1e-12;function hw(t,e){return e[1]-t[1]||e[0]-t[0]}function fw(t,e){var n,r,i,a=t.sort(hw).pop();for(cw=[],ow=new Array(t.length),aw=new Ik,sw=new Ik;;)if(i=Hk,a&&(!i||a[1]uw||Math.abs(i[0][1]-i[1][1])>uw)||delete cw[a]}(o,s,c,u),function(t,e,n,r){var i,a,o,s,c,u,l,h,f,d,p,g,y=ow.length,v=!0;for(i=0;iuw||Math.abs(g-f)>uw)&&(c.splice(s,0,cw.push(Rk(o,d,Math.abs(p-t)uw?[t,Math.abs(h-t)uw?[Math.abs(f-r)uw?[n,Math.abs(h-n)uw?[Math.abs(f-e)=s)return null;var c=t-i.site[0],u=e-i.site[1],l=c*c+u*u;do{i=a.cells[r=o],o=null,i.halfedges.forEach((function(n){var r=a.edges[n],s=r.left;if(s!==i.site&&s||(s=r.right)){var c=t-s[0],u=e-s[1],h=c*c+u*u;hr?(r+i)/2:Math.min(0,r)||Math.max(0,i),o>a?(a+o)/2:Math.min(0,a)||Math.max(0,o))}var Sw=function(){var t,e,n=_w,r=kw,i=Cw,a=Ew,o=Tw,s=[0,1/0],c=[[-1/0,-1/0],[1/0,1/0]],u=250,l=fp,h=lt("start","zoom","end"),f=0;function d(t){t.property("__zoom",ww).on("wheel.zoom",x).on("mousedown.zoom",_).on("dblclick.zoom",k).filter(o).on("touchstart.zoom",w).on("touchmove.zoom",E).on("touchend.zoom touchcancel.zoom",T).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function p(t,e){return(e=Math.max(s[0],Math.min(s[1],e)))===t.k?t:new yw(e,t.x,t.y)}function g(t,e,n){var r=e[0]-n[0]*t.k,i=e[1]-n[1]*t.k;return r===t.x&&i===t.y?t:new yw(t.k,r,i)}function y(t){return[(+t[0][0]+ +t[1][0])/2,(+t[0][1]+ +t[1][1])/2]}function v(t,e,n){t.on("start.zoom",(function(){m(this,arguments).start()})).on("interrupt.zoom end.zoom",(function(){m(this,arguments).end()})).tween("zoom",(function(){var t=this,i=arguments,a=m(t,i),o=r.apply(t,i),s=null==n?y(o):"function"==typeof n?n.apply(t,i):n,c=Math.max(o[1][0]-o[0][0],o[1][1]-o[0][1]),u=t.__zoom,h="function"==typeof e?e.apply(t,i):e,f=l(u.invert(s).concat(c/u.k),h.invert(s).concat(c/h.k));return function(t){if(1===t)t=h;else{var e=f(t),n=c/e[2];t=new yw(n,s[0]-e[0]*n,s[1]-e[1]*n)}a.zoom(null,t)}}))}function m(t,e,n){return!n&&t.__zooming||new b(t,e)}function b(t,e){this.that=t,this.args=e,this.active=0,this.extent=r.apply(t,e),this.taps=0}function x(){if(n.apply(this,arguments)){var t=m(this,arguments),e=this.__zoom,r=Math.max(s[0],Math.min(s[1],e.k*Math.pow(2,a.apply(this,arguments)))),o=Nn(this);if(t.wheel)t.mouse[0][0]===o[0]&&t.mouse[0][1]===o[1]||(t.mouse[1]=e.invert(t.mouse[0]=o)),clearTimeout(t.wheel);else{if(e.k===r)return;t.mouse=[o,e.invert(o)],or(this),t.start()}xw(),t.wheel=setTimeout(u,150),t.zoom("mouse",i(g(p(e,r),t.mouse[0],t.mouse[1]),t.extent,c))}function u(){t.wheel=null,t.end()}}function _(){if(!e&&n.apply(this,arguments)){var t=m(this,arguments,!0),r=ke(ce.view).on("mousemove.zoom",u,!0).on("mouseup.zoom",l,!0),a=Nn(this),o=ce.clientX,s=ce.clientY;Te(ce.view),bw(),t.mouse=[a,this.__zoom.invert(a)],or(this),t.start()}function u(){if(xw(),!t.moved){var e=ce.clientX-o,n=ce.clientY-s;t.moved=e*e+n*n>f}t.zoom("mouse",i(g(t.that.__zoom,t.mouse[0]=Nn(t.that),t.mouse[1]),t.extent,c))}function l(){r.on("mousemove.zoom mouseup.zoom",null),Ce(ce.view,t.moved),xw(),t.end()}}function k(){if(n.apply(this,arguments)){var t=this.__zoom,e=Nn(this),a=t.invert(e),o=t.k*(ce.shiftKey?.5:2),s=i(g(p(t,o),e,a),r.apply(this,arguments),c);xw(),u>0?ke(this).transition().duration(u).call(v,s,e):ke(this).call(d.transform,s)}}function w(){if(n.apply(this,arguments)){var e,r,i,a,o=ce.touches,s=o.length,c=m(this,arguments,ce.changedTouches.length===s);for(bw(),r=0;rh&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},M={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),56;case 1:return this.begin("type_directive"),57;case 2:return this.popState(),this.begin("arg_directive"),14;case 3:return this.popState(),this.popState(),59;case 4:return 58;case 5:return 5;case 6:case 7:case 8:case 9:case 10:break;case 11:return this.begin("ID"),16;case 12:return e.yytext=e.yytext.trim(),this.begin("ALIAS"),48;case 13:return this.popState(),this.popState(),this.begin("LINE"),18;case 14:return this.popState(),this.popState(),5;case 15:return this.begin("LINE"),27;case 16:return this.begin("LINE"),29;case 17:return this.begin("LINE"),30;case 18:return this.begin("LINE"),31;case 19:return this.begin("LINE"),36;case 20:return this.begin("LINE"),33;case 21:return this.begin("LINE"),35;case 22:return this.popState(),19;case 23:return 28;case 24:return 43;case 25:return 44;case 26:return 39;case 27:return 37;case 28:return this.begin("ID"),22;case 29:return this.begin("ID"),23;case 30:return 25;case 31:return 7;case 32:return 21;case 33:return 42;case 34:return 5;case 35:return e.yytext=e.yytext.trim(),48;case 36:return 51;case 37:return 52;case 38:return 49;case 39:return 50;case 40:return 53;case 41:return 54;case 42:return 55;case 43:return 46;case 44:return 47;case 45:return 5;case 46:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:((?!\n)\s)+)/i,/^(?:#[^\n]*)/i,/^(?:%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:participant\b)/i,/^(?:[^\->:\n,;]+?(?=((?!\n)\s)+as(?!\n)\s|[#\n;]|$))/i,/^(?:as\b)/i,/^(?:(?:))/i,/^(?:loop\b)/i,/^(?:rect\b)/i,/^(?:opt\b)/i,/^(?:alt\b)/i,/^(?:else\b)/i,/^(?:par\b)/i,/^(?:and\b)/i,/^(?:(?:[:]?(?:no)?wrap)?[^#\n;]*)/i,/^(?:end\b)/i,/^(?:left of\b)/i,/^(?:right of\b)/i,/^(?:over\b)/i,/^(?:note\b)/i,/^(?:activate\b)/i,/^(?:deactivate\b)/i,/^(?:title\b)/i,/^(?:sequenceDiagram\b)/i,/^(?:autonumber\b)/i,/^(?:,)/i,/^(?:;)/i,/^(?:[^\+\->:\n,;]+((?!(-x|--x))[\-]*[^\+\->:\n,;]+)*)/i,/^(?:->>)/i,/^(?:-->>)/i,/^(?:->)/i,/^(?:-->)/i,/^(?:-[x])/i,/^(?:--[x])/i,/^(?::(?:(?:no)?wrap)?[^#\n;]+)/i,/^(?:\+)/i,/^(?:-)/i,/^(?:$)/i,/^(?:.)/i],conditions:{open_directive:{rules:[1,8],inclusive:!1},type_directive:{rules:[2,3,8],inclusive:!1},arg_directive:{rules:[3,4,8],inclusive:!1},ID:{rules:[7,8,12],inclusive:!1},ALIAS:{rules:[7,8,13,14],inclusive:!1},LINE:{rules:[7,8,22],inclusive:!1},INITIAL:{rules:[0,5,6,8,9,10,11,15,16,17,18,19,20,21,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46],inclusive:!0}}};function O(){this.yy={}}return A.lexer=M,O.prototype=A,A.Parser=O,new O}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){var r=n(198);t.exports={Graph:r.Graph,json:n(301),alg:n(302),version:r.version}},function(t,e,n){var r;try{r={cloneDeep:n(313),constant:n(86),defaults:n(154),each:n(87),filter:n(128),find:n(314),flatten:n(156),forEach:n(126),forIn:n(319),has:n(93),isUndefined:n(139),last:n(320),map:n(140),mapValues:n(321),max:n(322),merge:n(324),min:n(329),minBy:n(330),now:n(331),pick:n(161),range:n(162),reduce:n(142),sortBy:n(338),uniqueId:n(163),values:n(147),zipObject:n(343)}}catch(t){}r||(r=window._),t.exports=r},function(t,e){var n=Array.isArray;t.exports=n},function(t,e,n){ -/** - * @license - * Copyright (c) 2012-2013 Chris Pettitt - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ -t.exports={graphlib:n(311),dagre:n(153),intersect:n(368),render:n(370),util:n(12),version:n(382)}},function(t,e){t.exports=function(t){return t.webpackPolyfill||(t.deprecate=function(){},t.paths=[],t.children||(t.children=[]),Object.defineProperty(t,"loaded",{enumerable:!0,get:function(){return t.l}}),Object.defineProperty(t,"id",{enumerable:!0,get:function(){return t.i}}),t.webpackPolyfill=1),t}},function(t,e,n){"use strict";var r=n(4),i=n(17).Graph;function a(t,e,n,i){var a;do{a=r.uniqueId(i)}while(t.hasNode(a));return n.dummy=e,t.setNode(a,n),a}function o(t){return r.max(r.map(t.nodes(),(function(e){var n=t.node(e).rank;if(!r.isUndefined(n))return n})))}t.exports={addDummyNode:a,simplify:function(t){var e=(new i).setGraph(t.graph());return r.forEach(t.nodes(),(function(n){e.setNode(n,t.node(n))})),r.forEach(t.edges(),(function(n){var r=e.edge(n.v,n.w)||{weight:0,minlen:1},i=t.edge(n);e.setEdge(n.v,n.w,{weight:r.weight+i.weight,minlen:Math.max(r.minlen,i.minlen)})})),e},asNonCompoundGraph:function(t){var e=new i({multigraph:t.isMultigraph()}).setGraph(t.graph());return r.forEach(t.nodes(),(function(n){t.children(n).length||e.setNode(n,t.node(n))})),r.forEach(t.edges(),(function(n){e.setEdge(n,t.edge(n))})),e},successorWeights:function(t){var e=r.map(t.nodes(),(function(e){var n={};return r.forEach(t.outEdges(e),(function(e){n[e.w]=(n[e.w]||0)+t.edge(e).weight})),n}));return r.zipObject(t.nodes(),e)},predecessorWeights:function(t){var e=r.map(t.nodes(),(function(e){var n={};return r.forEach(t.inEdges(e),(function(e){n[e.v]=(n[e.v]||0)+t.edge(e).weight})),n}));return r.zipObject(t.nodes(),e)},intersectRect:function(t,e){var n,r,i=t.x,a=t.y,o=e.x-i,s=e.y-a,c=t.width/2,u=t.height/2;if(!o&&!s)throw new Error("Not possible to find intersection inside of the rectangle");Math.abs(s)*c>Math.abs(o)*u?(s<0&&(u=-u),n=u*o/s,r=u):(o<0&&(c=-c),n=c,r=c*s/o);return{x:i+n,y:a+r}},buildLayerMatrix:function(t){var e=r.map(r.range(o(t)+1),(function(){return[]}));return r.forEach(t.nodes(),(function(n){var i=t.node(n),a=i.rank;r.isUndefined(a)||(e[a][i.order]=n)})),e},normalizeRanks:function(t){var e=r.min(r.map(t.nodes(),(function(e){return t.node(e).rank})));r.forEach(t.nodes(),(function(n){var i=t.node(n);r.has(i,"rank")&&(i.rank-=e)}))},removeEmptyRanks:function(t){var e=r.min(r.map(t.nodes(),(function(e){return t.node(e).rank}))),n=[];r.forEach(t.nodes(),(function(r){var i=t.node(r).rank-e;n[i]||(n[i]=[]),n[i].push(r)}));var i=0,a=t.graph().nodeRankFactor;r.forEach(n,(function(e,n){r.isUndefined(e)&&n%a!=0?--i:i&&r.forEach(e,(function(e){t.node(e).rank+=i}))}))},addBorderNode:function(t,e,n,r){var i={width:0,height:0};arguments.length>=4&&(i.rank=n,i.order=r);return a(t,"border",i,e)},maxRank:o,partition:function(t,e){var n={lhs:[],rhs:[]};return r.forEach(t,(function(t){e(t)?n.lhs.push(t):n.rhs.push(t)})),n},time:function(t,e){var n=r.now();try{return e()}finally{console.log(t+" time: "+(r.now()-n)+"ms")}},notime:function(t,e){return e()}}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(173),i=n(174),a=n(175),o={channel:r.default,lang:i.default,unit:a.default};e.default=o},function(t,e,n){var r;try{r={clone:n(199),constant:n(86),each:n(87),filter:n(128),has:n(93),isArray:n(5),isEmpty:n(276),isFunction:n(37),isUndefined:n(139),keys:n(30),map:n(140),reduce:n(142),size:n(279),transform:n(285),union:n(286),values:n(147)}}catch(t){}r||(r=window._),t.exports=r},function(t,e){t.exports=function(t){var e=typeof t;return null!=t&&("object"==e||"function"==e)}},function(t,e,n){var r=n(43);t.exports={isSubgraph:function(t,e){return!!t.children(e).length},edgeToId:function(t){return a(t.v)+":"+a(t.w)+":"+a(t.name)},applyStyle:function(t,e){e&&t.attr("style",e)},applyClass:function(t,e,n){e&&t.attr("class",e).attr("class",n+" "+t.attr("class"))},applyTransition:function(t,e){var n=e.graph();if(r.isPlainObject(n)){var i=n.transition;if(r.isFunction(i))return i(t)}return t}};var i=/:/g;function a(t){return t?String(t).replace(i,"\\:"):""}},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,7],n=[1,6],r=[1,14],i=[1,25],a=[1,28],o=[1,26],s=[1,27],c=[1,29],u=[1,30],l=[1,31],h=[1,33],f=[1,34],d=[1,35],p=[10,19],g=[1,47],y=[1,48],v=[1,49],m=[1,50],b=[1,51],x=[1,52],_=[10,19,25,32,33,41,44,45,46,47,48,49],k=[10,19,23,25,32,33,37,41,44,45,46,47,48,49,66,67,68],w=[10,13,17,19],E=[41,66,67,68],T=[41,48,49,66,67,68],C=[41,44,45,46,47,66,67,68],S=[10,19,25],A=[1,81],M={trace:function(){},yy:{},symbols_:{error:2,start:3,mermaidDoc:4,directive:5,graphConfig:6,openDirective:7,typeDirective:8,closeDirective:9,NEWLINE:10,":":11,argDirective:12,open_directive:13,type_directive:14,arg_directive:15,close_directive:16,CLASS_DIAGRAM:17,statements:18,EOF:19,statement:20,className:21,alphaNumToken:22,GENERICTYPE:23,relationStatement:24,LABEL:25,classStatement:26,methodStatement:27,annotationStatement:28,clickStatement:29,cssClassStatement:30,CLASS:31,STYLE_SEPARATOR:32,STRUCT_START:33,members:34,STRUCT_STOP:35,ANNOTATION_START:36,ANNOTATION_END:37,MEMBER:38,SEPARATOR:39,relation:40,STR:41,relationType:42,lineType:43,AGGREGATION:44,EXTENSION:45,COMPOSITION:46,DEPENDENCY:47,LINE:48,DOTTED_LINE:49,CALLBACK:50,LINK:51,CSSCLASS:52,commentToken:53,textToken:54,graphCodeTokens:55,textNoTagsToken:56,TAGSTART:57,TAGEND:58,"==":59,"--":60,PCT:61,DEFAULT:62,SPACE:63,MINUS:64,keywords:65,UNICODE_TEXT:66,NUM:67,ALPHA:68,$accept:0,$end:1},terminals_:{2:"error",10:"NEWLINE",11:":",13:"open_directive",14:"type_directive",15:"arg_directive",16:"close_directive",17:"CLASS_DIAGRAM",19:"EOF",23:"GENERICTYPE",25:"LABEL",31:"CLASS",32:"STYLE_SEPARATOR",33:"STRUCT_START",35:"STRUCT_STOP",36:"ANNOTATION_START",37:"ANNOTATION_END",38:"MEMBER",39:"SEPARATOR",41:"STR",44:"AGGREGATION",45:"EXTENSION",46:"COMPOSITION",47:"DEPENDENCY",48:"LINE",49:"DOTTED_LINE",50:"CALLBACK",51:"LINK",52:"CSSCLASS",55:"graphCodeTokens",57:"TAGSTART",58:"TAGEND",59:"==",60:"--",61:"PCT",62:"DEFAULT",63:"SPACE",64:"MINUS",65:"keywords",66:"UNICODE_TEXT",67:"NUM",68:"ALPHA"},productions_:[0,[3,1],[3,2],[4,1],[5,4],[5,6],[7,1],[8,1],[12,1],[9,1],[6,4],[18,1],[18,2],[18,3],[21,1],[21,2],[21,3],[21,2],[20,1],[20,2],[20,1],[20,1],[20,1],[20,1],[20,1],[20,1],[26,2],[26,4],[26,5],[26,7],[28,4],[34,1],[34,2],[27,1],[27,2],[27,1],[27,1],[24,3],[24,4],[24,4],[24,5],[40,3],[40,2],[40,2],[40,1],[42,1],[42,1],[42,1],[42,1],[43,1],[43,1],[29,3],[29,4],[29,3],[29,4],[30,3],[53,1],[53,1],[54,1],[54,1],[54,1],[54,1],[54,1],[54,1],[54,1],[56,1],[56,1],[56,1],[56,1],[22,1],[22,1],[22,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 6:r.parseDirective("%%{","open_directive");break;case 7:r.parseDirective(a[s],"type_directive");break;case 8:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 9:r.parseDirective("}%%","close_directive","class");break;case 14:this.$=a[s];break;case 15:this.$=a[s-1]+a[s];break;case 16:this.$=a[s-2]+"~"+a[s-1]+a[s];break;case 17:this.$=a[s-1]+"~"+a[s];break;case 18:r.addRelation(a[s]);break;case 19:a[s-1].title=r.cleanupLabel(a[s]),r.addRelation(a[s-1]);break;case 26:r.addClass(a[s]);break;case 27:r.addClass(a[s-2]),r.setCssClass(a[s-2],a[s]);break;case 28:r.addClass(a[s-3]),r.addMembers(a[s-3],a[s-1]);break;case 29:r.addClass(a[s-5]),r.setCssClass(a[s-5],a[s-3]),r.addMembers(a[s-5],a[s-1]);break;case 30:r.addAnnotation(a[s],a[s-2]);break;case 31:this.$=[a[s]];break;case 32:a[s].push(a[s-1]),this.$=a[s];break;case 33:break;case 34:r.addMember(a[s-1],r.cleanupLabel(a[s]));break;case 35:case 36:break;case 37:this.$={id1:a[s-2],id2:a[s],relation:a[s-1],relationTitle1:"none",relationTitle2:"none"};break;case 38:this.$={id1:a[s-3],id2:a[s],relation:a[s-1],relationTitle1:a[s-2],relationTitle2:"none"};break;case 39:this.$={id1:a[s-3],id2:a[s],relation:a[s-2],relationTitle1:"none",relationTitle2:a[s-1]};break;case 40:this.$={id1:a[s-4],id2:a[s],relation:a[s-2],relationTitle1:a[s-3],relationTitle2:a[s-1]};break;case 41:this.$={type1:a[s-2],type2:a[s],lineType:a[s-1]};break;case 42:this.$={type1:"none",type2:a[s],lineType:a[s-1]};break;case 43:this.$={type1:a[s-1],type2:"none",lineType:a[s]};break;case 44:this.$={type1:"none",type2:"none",lineType:a[s]};break;case 45:this.$=r.relationType.AGGREGATION;break;case 46:this.$=r.relationType.EXTENSION;break;case 47:this.$=r.relationType.COMPOSITION;break;case 48:this.$=r.relationType.DEPENDENCY;break;case 49:this.$=r.lineType.LINE;break;case 50:this.$=r.lineType.DOTTED_LINE;break;case 51:this.$=a[s-2],r.setClickEvent(a[s-1],a[s],void 0);break;case 52:this.$=a[s-3],r.setClickEvent(a[s-2],a[s-1],a[s]);break;case 53:this.$=a[s-2],r.setLink(a[s-1],a[s],void 0);break;case 54:this.$=a[s-3],r.setLink(a[s-2],a[s-1],a[s]);break;case 55:r.setCssClass(a[s-1],a[s])}},table:[{3:1,4:2,5:3,6:4,7:5,13:e,17:n},{1:[3]},{1:[2,1]},{3:8,4:2,5:3,6:4,7:5,13:e,17:n},{1:[2,3]},{8:9,14:[1,10]},{10:[1,11]},{14:[2,6]},{1:[2,2]},{9:12,11:[1,13],16:r},t([11,16],[2,7]),{5:23,7:5,13:e,18:15,20:16,21:24,22:32,24:17,26:18,27:19,28:20,29:21,30:22,31:i,36:a,38:o,39:s,50:c,51:u,52:l,66:h,67:f,68:d},{10:[1,36]},{12:37,15:[1,38]},{10:[2,9]},{19:[1,39]},{10:[1,40],19:[2,11]},t(p,[2,18],{25:[1,41]}),t(p,[2,20]),t(p,[2,21]),t(p,[2,22]),t(p,[2,23]),t(p,[2,24]),t(p,[2,25]),t(p,[2,33],{40:42,42:45,43:46,25:[1,44],41:[1,43],44:g,45:y,46:v,47:m,48:b,49:x}),{21:53,22:32,66:h,67:f,68:d},t(p,[2,35]),t(p,[2,36]),{22:54,66:h,67:f,68:d},{21:55,22:32,66:h,67:f,68:d},{21:56,22:32,66:h,67:f,68:d},{41:[1,57]},t(_,[2,14],{22:32,21:58,23:[1,59],66:h,67:f,68:d}),t(k,[2,69]),t(k,[2,70]),t(k,[2,71]),t(w,[2,4]),{9:60,16:r},{16:[2,8]},{1:[2,10]},{5:23,7:5,13:e,18:61,19:[2,12],20:16,21:24,22:32,24:17,26:18,27:19,28:20,29:21,30:22,31:i,36:a,38:o,39:s,50:c,51:u,52:l,66:h,67:f,68:d},t(p,[2,19]),{21:62,22:32,41:[1,63],66:h,67:f,68:d},{40:64,42:45,43:46,44:g,45:y,46:v,47:m,48:b,49:x},t(p,[2,34]),{43:65,48:b,49:x},t(E,[2,44],{42:66,44:g,45:y,46:v,47:m}),t(T,[2,45]),t(T,[2,46]),t(T,[2,47]),t(T,[2,48]),t(C,[2,49]),t(C,[2,50]),t(p,[2,26],{32:[1,67],33:[1,68]}),{37:[1,69]},{41:[1,70]},{41:[1,71]},{22:72,66:h,67:f,68:d},t(_,[2,15]),t(_,[2,17],{22:32,21:73,66:h,67:f,68:d}),{10:[1,74]},{19:[2,13]},t(S,[2,37]),{21:75,22:32,66:h,67:f,68:d},{21:76,22:32,41:[1,77],66:h,67:f,68:d},t(E,[2,43],{42:78,44:g,45:y,46:v,47:m}),t(E,[2,42]),{22:79,66:h,67:f,68:d},{34:80,38:A},{21:82,22:32,66:h,67:f,68:d},t(p,[2,51],{41:[1,83]}),t(p,[2,53],{41:[1,84]}),t(p,[2,55]),t(_,[2,16]),t(w,[2,5]),t(S,[2,39]),t(S,[2,38]),{21:85,22:32,66:h,67:f,68:d},t(E,[2,41]),t(p,[2,27],{33:[1,86]}),{35:[1,87]},{34:88,35:[2,31],38:A},t(p,[2,30]),t(p,[2,52]),t(p,[2,54]),t(S,[2,40]),{34:89,38:A},t(p,[2,28]),{35:[2,32]},{35:[1,90]},t(p,[2,29])],defaultActions:{2:[2,1],4:[2,3],7:[2,6],8:[2,2],14:[2,9],38:[2,8],39:[2,10],61:[2,13],88:[2,32]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},O={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),13;case 1:return this.begin("type_directive"),14;case 2:return this.popState(),this.begin("arg_directive"),11;case 3:return this.popState(),this.popState(),16;case 4:return 15;case 5:case 6:break;case 7:return 10;case 8:break;case 9:case 10:return 17;case 11:return this.begin("struct"),33;case 12:return"EOF_IN_STRUCT";case 13:return"OPEN_IN_STRUCT";case 14:return this.popState(),35;case 15:break;case 16:return"MEMBER";case 17:return 31;case 18:return 52;case 19:return 50;case 20:return 51;case 21:return 36;case 22:return 37;case 23:this.begin("generic");break;case 24:this.popState();break;case 25:return"GENERICTYPE";case 26:this.begin("string");break;case 27:this.popState();break;case 28:return"STR";case 29:case 30:return 45;case 31:case 32:return 47;case 33:return 46;case 34:return 44;case 35:return 48;case 36:return 49;case 37:return 25;case 38:return 32;case 39:return 64;case 40:return"DOT";case 41:return"PLUS";case 42:return 61;case 43:case 44:return"EQUALS";case 45:return 68;case 46:return"PUNCTUATION";case 47:return 67;case 48:return 66;case 49:return 63;case 50:return 19}},rules:[/^(?:%%\{)/,/^(?:((?:(?!\}%%)[^:.])*))/,/^(?::)/,/^(?:\}%%)/,/^(?:((?:(?!\}%%).|\n)*))/,/^(?:%%(?!\{)*[^\n]*(\r?\n?)+)/,/^(?:%%[^\n]*(\r?\n)*)/,/^(?:(\r?\n)+)/,/^(?:\s+)/,/^(?:classDiagram-v2\b)/,/^(?:classDiagram\b)/,/^(?:[{])/,/^(?:$)/,/^(?:[{])/,/^(?:[}])/,/^(?:[\n])/,/^(?:[^{}\n]*)/,/^(?:class\b)/,/^(?:cssClass\b)/,/^(?:callback\b)/,/^(?:link\b)/,/^(?:<<)/,/^(?:>>)/,/^(?:[~])/,/^(?:[~])/,/^(?:[^~]*)/,/^(?:["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:\s*<\|)/,/^(?:\s*\|>)/,/^(?:\s*>)/,/^(?:\s*<)/,/^(?:\s*\*)/,/^(?:\s*o\b)/,/^(?:--)/,/^(?:\.\.)/,/^(?::{1}[^:\n;]+)/,/^(?::{3})/,/^(?:-)/,/^(?:\.)/,/^(?:\+)/,/^(?:%)/,/^(?:=)/,/^(?:=)/,/^(?:\w+)/,/^(?:[!"#$%&'*+,-.`?\\/])/,/^(?:[0-9]+)/,/^(?:[\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6]|[\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377]|[\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5]|[\u03F7-\u0481\u048A-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA]|[\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE]|[\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA]|[\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0]|[\u08A2-\u08AC\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0977]|[\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2]|[\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A]|[\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39]|[\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8]|[\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C]|[\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C]|[\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99]|[\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0]|[\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D]|[\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3]|[\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10]|[\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1]|[\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81]|[\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3]|[\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6]|[\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A]|[\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081]|[\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D]|[\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0]|[\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310]|[\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C]|[\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711]|[\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7]|[\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C]|[\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16]|[\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF]|[\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC]|[\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D]|[\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D]|[\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3]|[\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F]|[\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128]|[\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184]|[\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3]|[\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6]|[\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE]|[\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C]|[\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D]|[\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC]|[\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B]|[\uA640-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788]|[\uA78B-\uA78E\uA790-\uA793\uA7A0-\uA7AA\uA7F8-\uA801\uA803-\uA805]|[\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB]|[\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28]|[\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5]|[\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4]|[\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E]|[\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D]|[\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36]|[\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D]|[\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC]|[\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF]|[\uFFD2-\uFFD7\uFFDA-\uFFDC])/,/^(?:\s)/,/^(?:$)/],conditions:{string:{rules:[27,28],inclusive:!1},generic:{rules:[24,25],inclusive:!1},struct:{rules:[12,13,14,15,16],inclusive:!1},open_directive:{rules:[1],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,17,18,19,20,21,22,23,26,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50],inclusive:!0}}};function D(){this.yy={}}return M.lexer=O,D.prototype=M,M.Parser=D,new D}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e){var n,r,i=t.exports={};function a(){throw new Error("setTimeout has not been defined")}function o(){throw new Error("clearTimeout has not been defined")}function s(t){if(n===setTimeout)return setTimeout(t,0);if((n===a||!n)&&setTimeout)return n=setTimeout,setTimeout(t,0);try{return n(t,0)}catch(e){try{return n.call(null,t,0)}catch(e){return n.call(this,t,0)}}}!function(){try{n="function"==typeof setTimeout?setTimeout:a}catch(t){n=a}try{r="function"==typeof clearTimeout?clearTimeout:o}catch(t){r=o}}();var c,u=[],l=!1,h=-1;function f(){l&&c&&(l=!1,c.length?u=c.concat(u):h=-1,u.length&&d())}function d(){if(!l){var t=s(f);l=!0;for(var e=u.length;e;){for(c=u,u=[];++h1)for(var n=1;n=0;r--){var i=t[r];"."===i?t.splice(r,1):".."===i?(t.splice(r,1),n++):n&&(t.splice(r,1),n--)}if(e)for(;n--;n)t.unshift("..");return t}function r(t,e){if(t.filter)return t.filter(e);for(var n=[],r=0;r=-1&&!i;a--){var o=a>=0?arguments[a]:t.cwd();if("string"!=typeof o)throw new TypeError("Arguments to path.resolve must be strings");o&&(e=o+"/"+e,i="/"===o.charAt(0))}return(i?"/":"")+(e=n(r(e.split("/"),(function(t){return!!t})),!i).join("/"))||"."},e.normalize=function(t){var a=e.isAbsolute(t),o="/"===i(t,-1);return(t=n(r(t.split("/"),(function(t){return!!t})),!a).join("/"))||a||(t="."),t&&o&&(t+="/"),(a?"/":"")+t},e.isAbsolute=function(t){return"/"===t.charAt(0)},e.join=function(){var t=Array.prototype.slice.call(arguments,0);return e.normalize(r(t,(function(t,e){if("string"!=typeof t)throw new TypeError("Arguments to path.join must be strings");return t})).join("/"))},e.relative=function(t,n){function r(t){for(var e=0;e=0&&""===t[n];n--);return e>n?[]:t.slice(e,n-e+1)}t=e.resolve(t).substr(1),n=e.resolve(n).substr(1);for(var i=r(t.split("/")),a=r(n.split("/")),o=Math.min(i.length,a.length),s=o,c=0;c=1;--a)if(47===(e=t.charCodeAt(a))){if(!i){r=a;break}}else i=!1;return-1===r?n?"/":".":n&&1===r?"/":t.slice(0,r)},e.basename=function(t,e){var n=function(t){"string"!=typeof t&&(t+="");var e,n=0,r=-1,i=!0;for(e=t.length-1;e>=0;--e)if(47===t.charCodeAt(e)){if(!i){n=e+1;break}}else-1===r&&(i=!1,r=e+1);return-1===r?"":t.slice(n,r)}(t);return e&&n.substr(-1*e.length)===e&&(n=n.substr(0,n.length-e.length)),n},e.extname=function(t){"string"!=typeof t&&(t+="");for(var e=-1,n=0,r=-1,i=!0,a=0,o=t.length-1;o>=0;--o){var s=t.charCodeAt(o);if(47!==s)-1===r&&(i=!1,r=o+1),46===s?-1===e?e=o:1!==a&&(a=1):-1!==e&&(a=-1);else if(!i){n=o+1;break}}return-1===e||-1===r||0===a||1===a&&e===r-1&&e===n+1?"":t.slice(e,r)};var i="b"==="ab".substr(-1)?function(t,e,n){return t.substr(e,n)}:function(t,e,n){return e<0&&(e=t.length+e),t.substr(e,n)}}).call(this,n(14))},function(t,e){t.exports=function(t){return null!=t&&"object"==typeof t}},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,3],r=[1,5],i=[1,7],a=[2,5],o=[1,15],s=[1,17],c=[1,19],u=[1,20],l=[1,21],h=[1,22],f=[1,28],d=[1,23],p=[1,24],g=[1,25],y=[1,26],v=[1,29],m=[1,32],b=[1,4,5,14,15,17,19,20,22,23,24,25,26,36,39],x=[1,4,5,12,13,14,15,17,19,20,22,23,24,25,26,36,39],_=[1,4,5,7,14,15,17,19,20,22,23,24,25,26,36,39],k=[4,5,14,15,17,19,20,22,23,24,25,26,36,39],w={trace:function(){},yy:{},symbols_:{error:2,start:3,SPACE:4,NL:5,directive:6,SD:7,document:8,line:9,statement:10,idStatement:11,DESCR:12,"--\x3e":13,HIDE_EMPTY:14,scale:15,WIDTH:16,COMPOSIT_STATE:17,STRUCT_START:18,STRUCT_STOP:19,STATE_DESCR:20,AS:21,ID:22,FORK:23,JOIN:24,CONCURRENT:25,note:26,notePosition:27,NOTE_TEXT:28,openDirective:29,typeDirective:30,closeDirective:31,":":32,argDirective:33,eol:34,";":35,EDGE_STATE:36,left_of:37,right_of:38,open_directive:39,type_directive:40,arg_directive:41,close_directive:42,$accept:0,$end:1},terminals_:{2:"error",4:"SPACE",5:"NL",7:"SD",12:"DESCR",13:"--\x3e",14:"HIDE_EMPTY",15:"scale",16:"WIDTH",17:"COMPOSIT_STATE",18:"STRUCT_START",19:"STRUCT_STOP",20:"STATE_DESCR",21:"AS",22:"ID",23:"FORK",24:"JOIN",25:"CONCURRENT",26:"note",28:"NOTE_TEXT",32:":",35:";",36:"EDGE_STATE",37:"left_of",38:"right_of",39:"open_directive",40:"type_directive",41:"arg_directive",42:"close_directive"},productions_:[0,[3,2],[3,2],[3,2],[3,2],[8,0],[8,2],[9,2],[9,1],[9,1],[10,1],[10,2],[10,3],[10,4],[10,1],[10,2],[10,1],[10,4],[10,3],[10,6],[10,1],[10,1],[10,1],[10,4],[10,4],[10,1],[6,3],[6,5],[34,1],[34,1],[11,1],[11,1],[27,1],[27,1],[29,1],[30,1],[33,1],[31,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 4:return r.setRootDoc(a[s]),a[s];case 5:this.$=[];break;case 6:"nl"!=a[s]&&(a[s-1].push(a[s]),this.$=a[s-1]);break;case 7:case 8:this.$=a[s];break;case 9:this.$="nl";break;case 10:this.$={stmt:"state",id:a[s],type:"default",description:""};break;case 11:this.$={stmt:"state",id:a[s-1],type:"default",description:r.trimColon(a[s])};break;case 12:this.$={stmt:"relation",state1:{stmt:"state",id:a[s-2],type:"default",description:""},state2:{stmt:"state",id:a[s],type:"default",description:""}};break;case 13:this.$={stmt:"relation",state1:{stmt:"state",id:a[s-3],type:"default",description:""},state2:{stmt:"state",id:a[s-1],type:"default",description:""},description:a[s].substr(1).trim()};break;case 17:this.$={stmt:"state",id:a[s-3],type:"default",description:"",doc:a[s-1]};break;case 18:var c=a[s],u=a[s-2].trim();if(a[s].match(":")){var l=a[s].split(":");c=l[0],u=[u,l[1]]}this.$={stmt:"state",id:c,type:"default",description:u};break;case 19:this.$={stmt:"state",id:a[s-3],type:"default",description:a[s-5],doc:a[s-1]};break;case 20:this.$={stmt:"state",id:a[s],type:"fork"};break;case 21:this.$={stmt:"state",id:a[s],type:"join"};break;case 22:this.$={stmt:"state",id:r.getDividerId(),type:"divider"};break;case 23:this.$={stmt:"state",id:a[s-1].trim(),note:{position:a[s-2].trim(),text:a[s].trim()}};break;case 30:case 31:this.$=a[s];break;case 34:r.parseDirective("%%{","open_directive");break;case 35:r.parseDirective(a[s],"type_directive");break;case 36:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 37:r.parseDirective("}%%","close_directive","state")}},table:[{3:1,4:e,5:n,6:4,7:r,29:6,39:i},{1:[3]},{3:8,4:e,5:n,6:4,7:r,29:6,39:i},{3:9,4:e,5:n,6:4,7:r,29:6,39:i},{3:10,4:e,5:n,6:4,7:r,29:6,39:i},t([1,4,5,14,15,17,20,22,23,24,25,26,36,39],a,{8:11}),{30:12,40:[1,13]},{40:[2,34]},{1:[2,1]},{1:[2,2]},{1:[2,3]},{1:[2,4],4:o,5:s,6:27,9:14,10:16,11:18,14:c,15:u,17:l,20:h,22:f,23:d,24:p,25:g,26:y,29:6,36:v,39:i},{31:30,32:[1,31],42:m},t([32,42],[2,35]),t(b,[2,6]),{6:27,10:33,11:18,14:c,15:u,17:l,20:h,22:f,23:d,24:p,25:g,26:y,29:6,36:v,39:i},t(b,[2,8]),t(b,[2,9]),t(b,[2,10],{12:[1,34],13:[1,35]}),t(b,[2,14]),{16:[1,36]},t(b,[2,16],{18:[1,37]}),{21:[1,38]},t(b,[2,20]),t(b,[2,21]),t(b,[2,22]),{27:39,28:[1,40],37:[1,41],38:[1,42]},t(b,[2,25]),t(x,[2,30]),t(x,[2,31]),t(_,[2,26]),{33:43,41:[1,44]},t(_,[2,37]),t(b,[2,7]),t(b,[2,11]),{11:45,22:f,36:v},t(b,[2,15]),t(k,a,{8:46}),{22:[1,47]},{22:[1,48]},{21:[1,49]},{22:[2,32]},{22:[2,33]},{31:50,42:m},{42:[2,36]},t(b,[2,12],{12:[1,51]}),{4:o,5:s,6:27,9:14,10:16,11:18,14:c,15:u,17:l,19:[1,52],20:h,22:f,23:d,24:p,25:g,26:y,29:6,36:v,39:i},t(b,[2,18],{18:[1,53]}),{28:[1,54]},{22:[1,55]},t(_,[2,27]),t(b,[2,13]),t(b,[2,17]),t(k,a,{8:56}),t(b,[2,23]),t(b,[2,24]),{4:o,5:s,6:27,9:14,10:16,11:18,14:c,15:u,17:l,19:[1,57],20:h,22:f,23:d,24:p,25:g,26:y,29:6,36:v,39:i},t(b,[2,19])],defaultActions:{7:[2,34],8:[2,1],9:[2,2],10:[2,3],41:[2,32],42:[2,33],44:[2,36]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},E={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),39;case 1:return this.begin("type_directive"),40;case 2:return this.popState(),this.begin("arg_directive"),32;case 3:return this.popState(),this.popState(),42;case 4:return 41;case 5:break;case 6:console.log("Crap after close");break;case 7:return 5;case 8:case 9:case 10:case 11:break;case 12:return this.pushState("SCALE"),15;case 13:return 16;case 14:this.popState();break;case 15:this.pushState("STATE");break;case 16:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),23;case 17:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),24;case 18:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),23;case 19:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),24;case 20:this.begin("STATE_STRING");break;case 21:return this.popState(),this.pushState("STATE_ID"),"AS";case 22:return this.popState(),"ID";case 23:this.popState();break;case 24:return"STATE_DESCR";case 25:return 17;case 26:this.popState();break;case 27:return this.popState(),this.pushState("struct"),18;case 28:return this.popState(),19;case 29:break;case 30:return this.begin("NOTE"),26;case 31:return this.popState(),this.pushState("NOTE_ID"),37;case 32:return this.popState(),this.pushState("NOTE_ID"),38;case 33:this.popState(),this.pushState("FLOATING_NOTE");break;case 34:return this.popState(),this.pushState("FLOATING_NOTE_ID"),"AS";case 35:break;case 36:return"NOTE_TEXT";case 37:return this.popState(),"ID";case 38:return this.popState(),this.pushState("NOTE_TEXT"),22;case 39:return this.popState(),e.yytext=e.yytext.substr(2).trim(),28;case 40:return this.popState(),e.yytext=e.yytext.slice(0,-8).trim(),28;case 41:case 42:return 7;case 43:return 14;case 44:return 36;case 45:return 22;case 46:return e.yytext=e.yytext.trim(),12;case 47:return 13;case 48:return 25;case 49:return 5;case 50:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n]+)/i,/^(?:[\s]+)/i,/^(?:((?!\n)\s)+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:scale\s+)/i,/^(?:\d+)/i,/^(?:\s+width\b)/i,/^(?:state\s+)/i,/^(?:.*<>)/i,/^(?:.*<>)/i,/^(?:.*\[\[fork\]\])/i,/^(?:.*\[\[join\]\])/i,/^(?:["])/i,/^(?:\s*as\s+)/i,/^(?:[^\n\{]*)/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[^\n\s\{]+)/i,/^(?:\n)/i,/^(?:\{)/i,/^(?:\})/i,/^(?:[\n])/i,/^(?:note\s+)/i,/^(?:left of\b)/i,/^(?:right of\b)/i,/^(?:")/i,/^(?:\s*as\s*)/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[^\n]*)/i,/^(?:\s*[^:\n\s\-]+)/i,/^(?:\s*:[^:\n;]+)/i,/^(?:\s*[^:;]+end note\b)/i,/^(?:stateDiagram\s+)/i,/^(?:stateDiagram-v2\s+)/i,/^(?:hide empty description\b)/i,/^(?:\[\*\])/i,/^(?:[^:\n\s\-\{]+)/i,/^(?:\s*:[^:\n;]+)/i,/^(?:-->)/i,/^(?:--)/i,/^(?:$)/i,/^(?:.)/i],conditions:{LINE:{rules:[9,10],inclusive:!1},close_directive:{rules:[9,10],inclusive:!1},arg_directive:{rules:[3,4,9,10],inclusive:!1},type_directive:{rules:[2,3,9,10],inclusive:!1},open_directive:{rules:[1,9,10],inclusive:!1},struct:{rules:[9,10,15,28,29,30,44,45,46,47,48],inclusive:!1},FLOATING_NOTE_ID:{rules:[37],inclusive:!1},FLOATING_NOTE:{rules:[34,35,36],inclusive:!1},NOTE_TEXT:{rules:[39,40],inclusive:!1},NOTE_ID:{rules:[38],inclusive:!1},NOTE:{rules:[31,32,33],inclusive:!1},SCALE:{rules:[13,14],inclusive:!1},ALIAS:{rules:[],inclusive:!1},STATE_ID:{rules:[22],inclusive:!1},STATE_STRING:{rules:[23,24],inclusive:!1},FORK_STATE:{rules:[],inclusive:!1},STATE:{rules:[9,10,16,17,18,19,20,21,25,26,27],inclusive:!1},ID:{rules:[9,10],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,10,11,12,15,27,30,41,42,43,44,45,46,47,49,50],inclusive:!0}}};function T(){this.yy={}}return w.lexer=E,T.prototype=w,w.Parser=T,new T}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t){t.exports=function(){"use strict";var e,r;function i(){return e.apply(null,arguments)}function a(t){return t instanceof Array||"[object Array]"===Object.prototype.toString.call(t)}function o(t){return null!=t&&"[object Object]"===Object.prototype.toString.call(t)}function s(t){return void 0===t}function c(t){return"number"==typeof t||"[object Number]"===Object.prototype.toString.call(t)}function u(t){return t instanceof Date||"[object Date]"===Object.prototype.toString.call(t)}function l(t,e){var n,r=[];for(n=0;n>>0,r=0;ryt(t)?(a=t+1,s-yt(t)):(a=t,s),{year:a,dayOfYear:o}}function Ft(t,e,n){var r,i,a=Bt(t.year(),e,n),o=Math.floor((t.dayOfYear()-a-1)/7)+1;return o<1?r=o+Pt(i=t.year()-1,e,n):o>Pt(t.year(),e,n)?(r=o-Pt(t.year(),e,n),i=t.year()+1):(i=t.year(),r=o),{week:r,year:i}}function Pt(t,e,n){var r=Bt(t,e,n),i=Bt(t+1,e,n);return(yt(t)-r+i)/7}function It(t,e){return t.slice(e,7).concat(t.slice(0,e))}W("w",["ww",2],"wo","week"),W("W",["WW",2],"Wo","isoWeek"),L("week","w"),L("isoWeek","W"),j("week",5),j("isoWeek",5),lt("w",Q),lt("ww",Q,q),lt("W",Q),lt("WW",Q,q),gt(["w","ww","W","WW"],(function(t,e,n,r){e[r.substr(0,1)]=w(t)})),W("d",0,"do","day"),W("dd",0,0,(function(t){return this.localeData().weekdaysMin(this,t)})),W("ddd",0,0,(function(t){return this.localeData().weekdaysShort(this,t)})),W("dddd",0,0,(function(t){return this.localeData().weekdays(this,t)})),W("e",0,0,"weekday"),W("E",0,0,"isoWeekday"),L("day","d"),L("weekday","e"),L("isoWeekday","E"),j("day",11),j("weekday",11),j("isoWeekday",11),lt("d",Q),lt("e",Q),lt("E",Q),lt("dd",(function(t,e){return e.weekdaysMinRegex(t)})),lt("ddd",(function(t,e){return e.weekdaysShortRegex(t)})),lt("dddd",(function(t,e){return e.weekdaysRegex(t)})),gt(["dd","ddd","dddd"],(function(t,e,n,r){var i=n._locale.weekdaysParse(t,r,n._strict);null!=i?e.d=i:p(n).invalidWeekday=t})),gt(["d","e","E"],(function(t,e,n,r){e[r]=w(t)}));var jt="Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),Rt="Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),Yt="Su_Mo_Tu_We_Th_Fr_Sa".split("_"),zt=ct,Ut=ct,$t=ct;function Wt(){function t(t,e){return e.length-t.length}var e,n,r,i,a,o=[],s=[],c=[],u=[];for(e=0;e<7;e++)n=d([2e3,1]).day(e),r=this.weekdaysMin(n,""),i=this.weekdaysShort(n,""),a=this.weekdays(n,""),o.push(r),s.push(i),c.push(a),u.push(r),u.push(i),u.push(a);for(o.sort(t),s.sort(t),c.sort(t),u.sort(t),e=0;e<7;e++)s[e]=ft(s[e]),c[e]=ft(c[e]),u[e]=ft(u[e]);this._weekdaysRegex=new RegExp("^("+u.join("|")+")","i"),this._weekdaysShortRegex=this._weekdaysRegex,this._weekdaysMinRegex=this._weekdaysRegex,this._weekdaysStrictRegex=new RegExp("^("+c.join("|")+")","i"),this._weekdaysShortStrictRegex=new RegExp("^("+s.join("|")+")","i"),this._weekdaysMinStrictRegex=new RegExp("^("+o.join("|")+")","i")}function Vt(){return this.hours()%12||12}function Ht(t,e){W(t,0,0,(function(){return this.localeData().meridiem(this.hours(),this.minutes(),e)}))}function Gt(t,e){return e._meridiemParse}W("H",["HH",2],0,"hour"),W("h",["hh",2],0,Vt),W("k",["kk",2],0,(function(){return this.hours()||24})),W("hmm",0,0,(function(){return""+Vt.apply(this)+R(this.minutes(),2)})),W("hmmss",0,0,(function(){return""+Vt.apply(this)+R(this.minutes(),2)+R(this.seconds(),2)})),W("Hmm",0,0,(function(){return""+this.hours()+R(this.minutes(),2)})),W("Hmmss",0,0,(function(){return""+this.hours()+R(this.minutes(),2)+R(this.seconds(),2)})),Ht("a",!0),Ht("A",!1),L("hour","h"),j("hour",13),lt("a",Gt),lt("A",Gt),lt("H",Q),lt("h",Q),lt("k",Q),lt("HH",Q,q),lt("hh",Q,q),lt("kk",Q,q),lt("hmm",K),lt("hmmss",tt),lt("Hmm",K),lt("Hmmss",tt),pt(["H","HH"],3),pt(["k","kk"],(function(t,e,n){var r=w(t);e[3]=24===r?0:r})),pt(["a","A"],(function(t,e,n){n._isPm=n._locale.isPM(t),n._meridiem=t})),pt(["h","hh"],(function(t,e,n){e[3]=w(t),p(n).bigHour=!0})),pt("hmm",(function(t,e,n){var r=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r)),p(n).bigHour=!0})),pt("hmmss",(function(t,e,n){var r=t.length-4,i=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r,2)),e[5]=w(t.substr(i)),p(n).bigHour=!0})),pt("Hmm",(function(t,e,n){var r=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r))})),pt("Hmmss",(function(t,e,n){var r=t.length-4,i=t.length-2;e[3]=w(t.substr(0,r)),e[4]=w(t.substr(r,2)),e[5]=w(t.substr(i))}));var qt,Xt=xt("Hours",!0),Zt={calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},longDateFormat:{LTS:"h:mm:ss A",LT:"h:mm A",L:"MM/DD/YYYY",LL:"MMMM D, YYYY",LLL:"MMMM D, YYYY h:mm A",LLLL:"dddd, MMMM D, YYYY h:mm A"},invalidDate:"Invalid date",ordinal:"%d",dayOfMonthOrdinalParse:/\d{1,2}/,relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",ss:"%d seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},months:Tt,monthsShort:Ct,week:{dow:0,doy:6},weekdays:jt,weekdaysMin:Yt,weekdaysShort:Rt,meridiemParse:/[ap]\.?m?\.?/i},Jt={},Qt={};function Kt(t){return t?t.toLowerCase().replace("_","-"):t}function te(e){var r=null;if(!Jt[e]&&void 0!==t&&t&&t.exports)try{r=qt._abbr,n(171)("./"+e),ee(r)}catch(e){}return Jt[e]}function ee(t,e){var n;return t&&((n=s(e)?re(t):ne(t,e))?qt=n:"undefined"!=typeof console&&console.warn&&console.warn("Locale "+t+" not found. Did you forget to load it?")),qt._abbr}function ne(t,e){if(null===e)return delete Jt[t],null;var n,r=Zt;if(e.abbr=t,null!=Jt[t])M("defineLocaleOverride","use moment.updateLocale(localeName, config) to change an existing locale. moment.defineLocale(localeName, config) should only be used for creating a new locale See http://momentjs.com/guides/#/warnings/define-locale/ for more info."),r=Jt[t]._config;else if(null!=e.parentLocale)if(null!=Jt[e.parentLocale])r=Jt[e.parentLocale]._config;else{if(null==(n=te(e.parentLocale)))return Qt[e.parentLocale]||(Qt[e.parentLocale]=[]),Qt[e.parentLocale].push({name:t,config:e}),null;r=n._config}return Jt[t]=new N(D(r,e)),Qt[t]&&Qt[t].forEach((function(t){ne(t.name,t.config)})),ee(t),Jt[t]}function re(t){var e;if(t&&t._locale&&t._locale._abbr&&(t=t._locale._abbr),!t)return qt;if(!a(t)){if(e=te(t))return e;t=[t]}return function(t){for(var e,n,r,i,a=0;a=e&&E(i,n,!0)>=e-1)break;e--}a++}return qt}(t)}function ie(t){var e,n=t._a;return n&&-2===p(t).overflow&&(e=n[1]<0||11wt(n[0],n[1])?2:n[3]<0||24Pt(n,a,o)?p(t)._overflowWeeks=!0:null!=c?p(t)._overflowWeekday=!0:(s=Lt(n,r,i,a,o),t._a[0]=s.year,t._dayOfYear=s.dayOfYear)}(t),null!=t._dayOfYear&&(o=ae(t._a[0],r[0]),(t._dayOfYear>yt(o)||0===t._dayOfYear)&&(p(t)._overflowDayOfYear=!0),n=Nt(o,0,t._dayOfYear),t._a[1]=n.getUTCMonth(),t._a[2]=n.getUTCDate()),e=0;e<3&&null==t._a[e];++e)t._a[e]=s[e]=r[e];for(;e<7;e++)t._a[e]=s[e]=null==t._a[e]?2===e?1:0:t._a[e];24===t._a[3]&&0===t._a[4]&&0===t._a[5]&&0===t._a[6]&&(t._nextDay=!0,t._a[3]=0),t._d=(t._useUTC?Nt:function(t,e,n,r,i,a,o){var s;return t<100&&0<=t?(s=new Date(t+400,e,n,r,i,a,o),isFinite(s.getFullYear())&&s.setFullYear(t)):s=new Date(t,e,n,r,i,a,o),s}).apply(null,s),a=t._useUTC?t._d.getUTCDay():t._d.getDay(),null!=t._tzm&&t._d.setUTCMinutes(t._d.getUTCMinutes()-t._tzm),t._nextDay&&(t._a[3]=24),t._w&&void 0!==t._w.d&&t._w.d!==a&&(p(t).weekdayMismatch=!0)}}var se=/^\s*((?:[+-]\d{6}|\d{4})-(?:\d\d-\d\d|W\d\d-\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?::\d\d(?::\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,ce=/^\s*((?:[+-]\d{6}|\d{4})(?:\d\d\d\d|W\d\d\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?:\d\d(?:\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/,ue=/Z|[+-]\d\d(?::?\d\d)?/,le=[["YYYYYY-MM-DD",/[+-]\d{6}-\d\d-\d\d/],["YYYY-MM-DD",/\d{4}-\d\d-\d\d/],["GGGG-[W]WW-E",/\d{4}-W\d\d-\d/],["GGGG-[W]WW",/\d{4}-W\d\d/,!1],["YYYY-DDD",/\d{4}-\d{3}/],["YYYY-MM",/\d{4}-\d\d/,!1],["YYYYYYMMDD",/[+-]\d{10}/],["YYYYMMDD",/\d{8}/],["GGGG[W]WWE",/\d{4}W\d{3}/],["GGGG[W]WW",/\d{4}W\d{2}/,!1],["YYYYDDD",/\d{7}/]],he=[["HH:mm:ss.SSSS",/\d\d:\d\d:\d\d\.\d+/],["HH:mm:ss,SSSS",/\d\d:\d\d:\d\d,\d+/],["HH:mm:ss",/\d\d:\d\d:\d\d/],["HH:mm",/\d\d:\d\d/],["HHmmss.SSSS",/\d\d\d\d\d\d\.\d+/],["HHmmss,SSSS",/\d\d\d\d\d\d,\d+/],["HHmmss",/\d\d\d\d\d\d/],["HHmm",/\d\d\d\d/],["HH",/\d\d/]],fe=/^\/?Date\((\-?\d+)/i;function de(t){var e,n,r,i,a,o,s=t._i,c=se.exec(s)||ce.exec(s);if(c){for(p(t).iso=!0,e=0,n=le.length;en.valueOf():n.valueOf()this.clone().month(0).utcOffset()||this.utcOffset()>this.clone().month(5).utcOffset()},on.isLocal=function(){return!!this.isValid()&&!this._isUTC},on.isUtcOffset=function(){return!!this.isValid()&&this._isUTC},on.isUtc=Be,on.isUTC=Be,on.zoneAbbr=function(){return this._isUTC?"UTC":""},on.zoneName=function(){return this._isUTC?"Coordinated Universal Time":""},on.dates=C("dates accessor is deprecated. Use date instead.",Ke),on.months=C("months accessor is deprecated. Use month instead",At),on.years=C("years accessor is deprecated. Use year instead",bt),on.zone=C("moment().zone is deprecated, use moment().utcOffset instead. http://momentjs.com/guides/#/warnings/zone/",(function(t,e){return null!=t?("string"!=typeof t&&(t=-t),this.utcOffset(t,e),this):-this.utcOffset()})),on.isDSTShifted=C("isDSTShifted is deprecated. See http://momentjs.com/guides/#/warnings/dst-shifted/ for more information",(function(){if(!s(this._isDSTShifted))return this._isDSTShifted;var t={};if(m(t,this),(t=me(t))._a){var e=t._isUTC?d(t._a):xe(t._a);this._isDSTShifted=this.isValid()&&0h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},qt={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),12;case 1:return this.begin("type_directive"),13;case 2:return this.popState(),this.begin("arg_directive"),10;case 3:return this.popState(),this.popState(),15;case 4:return 14;case 5:case 6:break;case 7:this.begin("string");break;case 8:this.popState();break;case 9:return"STR";case 10:return 75;case 11:return 84;case 12:return 76;case 13:return 90;case 14:return 77;case 15:return 78;case 16:return 79;case 17:case 18:return t.lex.firstGraph()&&this.begin("dir"),24;case 19:return 38;case 20:return 42;case 21:case 22:case 23:case 24:return 87;case 25:return this.popState(),25;case 26:case 27:case 28:case 29:case 30:case 31:case 32:case 33:case 34:case 35:return this.popState(),26;case 36:return 91;case 37:return 99;case 38:return 47;case 39:return 96;case 40:return 46;case 41:return 20;case 42:return 92;case 43:return 110;case 44:case 45:case 46:return 70;case 47:case 48:case 49:return 69;case 50:return 51;case 51:return 52;case 52:return 53;case 53:return 54;case 54:return 55;case 55:return 56;case 56:return 57;case 57:return 58;case 58:return 97;case 59:return 100;case 60:return 111;case 61:return 108;case 62:return 101;case 63:case 64:return 109;case 65:return 102;case 66:return 61;case 67:return 81;case 68:return"SEP";case 69:return 80;case 70:return 95;case 71:return 63;case 72:return 62;case 73:return 65;case 74:return 64;case 75:return 106;case 76:return 107;case 77:return 71;case 78:return 49;case 79:return 50;case 80:return 40;case 81:return 41;case 82:return 59;case 83:return 60;case 84:return 117;case 85:return 21;case 86:return 22;case 87:return 23}},rules:[/^(?:%%\{)/,/^(?:((?:(?!\}%%)[^:.])*))/,/^(?::)/,/^(?:\}%%)/,/^(?:((?:(?!\}%%).|\n)*))/,/^(?:%%(?!\{)[^\n]*)/,/^(?:[^\}]%%[^\n]*)/,/^(?:["])/,/^(?:["])/,/^(?:[^"]*)/,/^(?:style\b)/,/^(?:default\b)/,/^(?:linkStyle\b)/,/^(?:interpolate\b)/,/^(?:classDef\b)/,/^(?:class\b)/,/^(?:click\b)/,/^(?:graph\b)/,/^(?:flowchart\b)/,/^(?:subgraph\b)/,/^(?:end\b\s*)/,/^(?:_self\b)/,/^(?:_blank\b)/,/^(?:_parent\b)/,/^(?:_top\b)/,/^(?:(\r?\n)*\s*\n)/,/^(?:\s*LR\b)/,/^(?:\s*RL\b)/,/^(?:\s*TB\b)/,/^(?:\s*BT\b)/,/^(?:\s*TD\b)/,/^(?:\s*BR\b)/,/^(?:\s*<)/,/^(?:\s*>)/,/^(?:\s*\^)/,/^(?:\s*v\b)/,/^(?:[0-9]+)/,/^(?:#)/,/^(?::::)/,/^(?::)/,/^(?:&)/,/^(?:;)/,/^(?:,)/,/^(?:\*)/,/^(?:\s*[xo<]?--+[-xo>]\s*)/,/^(?:\s*[xo<]?==+[=xo>]\s*)/,/^(?:\s*[xo<]?-?\.+-[xo>]?\s*)/,/^(?:\s*[xo<]?--\s*)/,/^(?:\s*[xo<]?==\s*)/,/^(?:\s*[xo<]?-\.\s*)/,/^(?:\(-)/,/^(?:-\))/,/^(?:\(\[)/,/^(?:\]\))/,/^(?:\[\[)/,/^(?:\]\])/,/^(?:\[\()/,/^(?:\)\])/,/^(?:-)/,/^(?:\.)/,/^(?:[\_])/,/^(?:\+)/,/^(?:%)/,/^(?:=)/,/^(?:=)/,/^(?:<)/,/^(?:>)/,/^(?:\^)/,/^(?:\\\|)/,/^(?:v\b)/,/^(?:[A-Za-z]+)/,/^(?:\\\])/,/^(?:\[\/)/,/^(?:\/\])/,/^(?:\[\\)/,/^(?:[!"#$%&'*+,-.`?\\_/])/,/^(?:[\u00AA\u00B5\u00BA\u00C0-\u00D6\u00D8-\u00F6]|[\u00F8-\u02C1\u02C6-\u02D1\u02E0-\u02E4\u02EC\u02EE\u0370-\u0374\u0376\u0377]|[\u037A-\u037D\u0386\u0388-\u038A\u038C\u038E-\u03A1\u03A3-\u03F5]|[\u03F7-\u0481\u048A-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05D0-\u05EA]|[\u05F0-\u05F2\u0620-\u064A\u066E\u066F\u0671-\u06D3\u06D5\u06E5\u06E6\u06EE]|[\u06EF\u06FA-\u06FC\u06FF\u0710\u0712-\u072F\u074D-\u07A5\u07B1\u07CA-\u07EA]|[\u07F4\u07F5\u07FA\u0800-\u0815\u081A\u0824\u0828\u0840-\u0858\u08A0]|[\u08A2-\u08AC\u0904-\u0939\u093D\u0950\u0958-\u0961\u0971-\u0977]|[\u0979-\u097F\u0985-\u098C\u098F\u0990\u0993-\u09A8\u09AA-\u09B0\u09B2]|[\u09B6-\u09B9\u09BD\u09CE\u09DC\u09DD\u09DF-\u09E1\u09F0\u09F1\u0A05-\u0A0A]|[\u0A0F\u0A10\u0A13-\u0A28\u0A2A-\u0A30\u0A32\u0A33\u0A35\u0A36\u0A38\u0A39]|[\u0A59-\u0A5C\u0A5E\u0A72-\u0A74\u0A85-\u0A8D\u0A8F-\u0A91\u0A93-\u0AA8]|[\u0AAA-\u0AB0\u0AB2\u0AB3\u0AB5-\u0AB9\u0ABD\u0AD0\u0AE0\u0AE1\u0B05-\u0B0C]|[\u0B0F\u0B10\u0B13-\u0B28\u0B2A-\u0B30\u0B32\u0B33\u0B35-\u0B39\u0B3D\u0B5C]|[\u0B5D\u0B5F-\u0B61\u0B71\u0B83\u0B85-\u0B8A\u0B8E-\u0B90\u0B92-\u0B95\u0B99]|[\u0B9A\u0B9C\u0B9E\u0B9F\u0BA3\u0BA4\u0BA8-\u0BAA\u0BAE-\u0BB9\u0BD0]|[\u0C05-\u0C0C\u0C0E-\u0C10\u0C12-\u0C28\u0C2A-\u0C33\u0C35-\u0C39\u0C3D]|[\u0C58\u0C59\u0C60\u0C61\u0C85-\u0C8C\u0C8E-\u0C90\u0C92-\u0CA8\u0CAA-\u0CB3]|[\u0CB5-\u0CB9\u0CBD\u0CDE\u0CE0\u0CE1\u0CF1\u0CF2\u0D05-\u0D0C\u0D0E-\u0D10]|[\u0D12-\u0D3A\u0D3D\u0D4E\u0D60\u0D61\u0D7A-\u0D7F\u0D85-\u0D96\u0D9A-\u0DB1]|[\u0DB3-\u0DBB\u0DBD\u0DC0-\u0DC6\u0E01-\u0E30\u0E32\u0E33\u0E40-\u0E46\u0E81]|[\u0E82\u0E84\u0E87\u0E88\u0E8A\u0E8D\u0E94-\u0E97\u0E99-\u0E9F\u0EA1-\u0EA3]|[\u0EA5\u0EA7\u0EAA\u0EAB\u0EAD-\u0EB0\u0EB2\u0EB3\u0EBD\u0EC0-\u0EC4\u0EC6]|[\u0EDC-\u0EDF\u0F00\u0F40-\u0F47\u0F49-\u0F6C\u0F88-\u0F8C\u1000-\u102A]|[\u103F\u1050-\u1055\u105A-\u105D\u1061\u1065\u1066\u106E-\u1070\u1075-\u1081]|[\u108E\u10A0-\u10C5\u10C7\u10CD\u10D0-\u10FA\u10FC-\u1248\u124A-\u124D]|[\u1250-\u1256\u1258\u125A-\u125D\u1260-\u1288\u128A-\u128D\u1290-\u12B0]|[\u12B2-\u12B5\u12B8-\u12BE\u12C0\u12C2-\u12C5\u12C8-\u12D6\u12D8-\u1310]|[\u1312-\u1315\u1318-\u135A\u1380-\u138F\u13A0-\u13F4\u1401-\u166C]|[\u166F-\u167F\u1681-\u169A\u16A0-\u16EA\u1700-\u170C\u170E-\u1711]|[\u1720-\u1731\u1740-\u1751\u1760-\u176C\u176E-\u1770\u1780-\u17B3\u17D7]|[\u17DC\u1820-\u1877\u1880-\u18A8\u18AA\u18B0-\u18F5\u1900-\u191C]|[\u1950-\u196D\u1970-\u1974\u1980-\u19AB\u19C1-\u19C7\u1A00-\u1A16]|[\u1A20-\u1A54\u1AA7\u1B05-\u1B33\u1B45-\u1B4B\u1B83-\u1BA0\u1BAE\u1BAF]|[\u1BBA-\u1BE5\u1C00-\u1C23\u1C4D-\u1C4F\u1C5A-\u1C7D\u1CE9-\u1CEC]|[\u1CEE-\u1CF1\u1CF5\u1CF6\u1D00-\u1DBF\u1E00-\u1F15\u1F18-\u1F1D]|[\u1F20-\u1F45\u1F48-\u1F4D\u1F50-\u1F57\u1F59\u1F5B\u1F5D\u1F5F-\u1F7D]|[\u1F80-\u1FB4\u1FB6-\u1FBC\u1FBE\u1FC2-\u1FC4\u1FC6-\u1FCC\u1FD0-\u1FD3]|[\u1FD6-\u1FDB\u1FE0-\u1FEC\u1FF2-\u1FF4\u1FF6-\u1FFC\u2071\u207F]|[\u2090-\u209C\u2102\u2107\u210A-\u2113\u2115\u2119-\u211D\u2124\u2126\u2128]|[\u212A-\u212D\u212F-\u2139\u213C-\u213F\u2145-\u2149\u214E\u2183\u2184]|[\u2C00-\u2C2E\u2C30-\u2C5E\u2C60-\u2CE4\u2CEB-\u2CEE\u2CF2\u2CF3]|[\u2D00-\u2D25\u2D27\u2D2D\u2D30-\u2D67\u2D6F\u2D80-\u2D96\u2DA0-\u2DA6]|[\u2DA8-\u2DAE\u2DB0-\u2DB6\u2DB8-\u2DBE\u2DC0-\u2DC6\u2DC8-\u2DCE]|[\u2DD0-\u2DD6\u2DD8-\u2DDE\u2E2F\u3005\u3006\u3031-\u3035\u303B\u303C]|[\u3041-\u3096\u309D-\u309F\u30A1-\u30FA\u30FC-\u30FF\u3105-\u312D]|[\u3131-\u318E\u31A0-\u31BA\u31F0-\u31FF\u3400-\u4DB5\u4E00-\u9FCC]|[\uA000-\uA48C\uA4D0-\uA4FD\uA500-\uA60C\uA610-\uA61F\uA62A\uA62B]|[\uA640-\uA66E\uA67F-\uA697\uA6A0-\uA6E5\uA717-\uA71F\uA722-\uA788]|[\uA78B-\uA78E\uA790-\uA793\uA7A0-\uA7AA\uA7F8-\uA801\uA803-\uA805]|[\uA807-\uA80A\uA80C-\uA822\uA840-\uA873\uA882-\uA8B3\uA8F2-\uA8F7\uA8FB]|[\uA90A-\uA925\uA930-\uA946\uA960-\uA97C\uA984-\uA9B2\uA9CF\uAA00-\uAA28]|[\uAA40-\uAA42\uAA44-\uAA4B\uAA60-\uAA76\uAA7A\uAA80-\uAAAF\uAAB1\uAAB5]|[\uAAB6\uAAB9-\uAABD\uAAC0\uAAC2\uAADB-\uAADD\uAAE0-\uAAEA\uAAF2-\uAAF4]|[\uAB01-\uAB06\uAB09-\uAB0E\uAB11-\uAB16\uAB20-\uAB26\uAB28-\uAB2E]|[\uABC0-\uABE2\uAC00-\uD7A3\uD7B0-\uD7C6\uD7CB-\uD7FB\uF900-\uFA6D]|[\uFA70-\uFAD9\uFB00-\uFB06\uFB13-\uFB17\uFB1D\uFB1F-\uFB28\uFB2A-\uFB36]|[\uFB38-\uFB3C\uFB3E\uFB40\uFB41\uFB43\uFB44\uFB46-\uFBB1\uFBD3-\uFD3D]|[\uFD50-\uFD8F\uFD92-\uFDC7\uFDF0-\uFDFB\uFE70-\uFE74\uFE76-\uFEFC]|[\uFF21-\uFF3A\uFF41-\uFF5A\uFF66-\uFFBE\uFFC2-\uFFC7\uFFCA-\uFFCF]|[\uFFD2-\uFFD7\uFFDA-\uFFDC])/,/^(?:\|)/,/^(?:\()/,/^(?:\))/,/^(?:\[)/,/^(?:\])/,/^(?:\{)/,/^(?:\})/,/^(?:")/,/^(?:(\r?\n)+)/,/^(?:\s)/,/^(?:$)/],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},vertex:{rules:[],inclusive:!1},dir:{rules:[25,26,27,28,29,30,31,32,33,34,35],inclusive:!1},string:{rules:[8,9],inclusive:!1},INITIAL:{rules:[0,5,6,7,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87],inclusive:!0}}};function Xt(){this.yy={}}return Gt.lexer=qt,Xt.prototype=Gt,Gt.Parser=Xt,new Xt}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,3],n=[1,5],r=[7,9,11,12,13,14,15,16,17,18,20,27,32],i=[1,15],a=[1,16],o=[1,17],s=[1,18],c=[1,19],u=[1,20],l=[1,21],h=[1,23],f=[1,25],d=[1,28],p=[5,7,9,11,12,13,14,15,16,17,18,20,27,32],g={trace:function(){},yy:{},symbols_:{error:2,start:3,directive:4,gantt:5,document:6,EOF:7,line:8,SPACE:9,statement:10,NL:11,dateFormat:12,inclusiveEndDates:13,axisFormat:14,excludes:15,todayMarker:16,title:17,section:18,clickStatement:19,taskTxt:20,taskData:21,openDirective:22,typeDirective:23,closeDirective:24,":":25,argDirective:26,click:27,callbackname:28,callbackargs:29,href:30,clickStatementDebug:31,open_directive:32,type_directive:33,arg_directive:34,close_directive:35,$accept:0,$end:1},terminals_:{2:"error",5:"gantt",7:"EOF",9:"SPACE",11:"NL",12:"dateFormat",13:"inclusiveEndDates",14:"axisFormat",15:"excludes",16:"todayMarker",17:"title",18:"section",20:"taskTxt",21:"taskData",25:":",27:"click",28:"callbackname",29:"callbackargs",30:"href",32:"open_directive",33:"type_directive",34:"arg_directive",35:"close_directive"},productions_:[0,[3,2],[3,3],[6,0],[6,2],[8,2],[8,1],[8,1],[8,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,1],[10,2],[10,1],[4,4],[4,6],[19,2],[19,3],[19,3],[19,4],[19,3],[19,4],[19,2],[31,2],[31,3],[31,3],[31,4],[31,3],[31,4],[31,2],[22,1],[23,1],[26,1],[24,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 2:return a[s-1];case 3:this.$=[];break;case 4:a[s-1].push(a[s]),this.$=a[s-1];break;case 5:case 6:this.$=a[s];break;case 7:case 8:this.$=[];break;case 9:r.setDateFormat(a[s].substr(11)),this.$=a[s].substr(11);break;case 10:r.enableInclusiveEndDates(),this.$=a[s].substr(18);break;case 11:r.setAxisFormat(a[s].substr(11)),this.$=a[s].substr(11);break;case 12:r.setExcludes(a[s].substr(9)),this.$=a[s].substr(9);break;case 13:r.setTodayMarker(a[s].substr(12)),this.$=a[s].substr(12);break;case 14:r.setTitle(a[s].substr(6)),this.$=a[s].substr(6);break;case 15:r.addSection(a[s].substr(8)),this.$=a[s].substr(8);break;case 17:r.addTask(a[s-1],a[s]),this.$="task";break;case 21:this.$=a[s-1],r.setClickEvent(a[s-1],a[s],null);break;case 22:this.$=a[s-2],r.setClickEvent(a[s-2],a[s-1],a[s]);break;case 23:this.$=a[s-2],r.setClickEvent(a[s-2],a[s-1],null),r.setLink(a[s-2],a[s]);break;case 24:this.$=a[s-3],r.setClickEvent(a[s-3],a[s-2],a[s-1]),r.setLink(a[s-3],a[s]);break;case 25:this.$=a[s-2],r.setClickEvent(a[s-2],a[s],null),r.setLink(a[s-2],a[s-1]);break;case 26:this.$=a[s-3],r.setClickEvent(a[s-3],a[s-1],a[s]),r.setLink(a[s-3],a[s-2]);break;case 27:this.$=a[s-1],r.setLink(a[s-1],a[s]);break;case 28:case 34:this.$=a[s-1]+" "+a[s];break;case 29:case 30:case 32:this.$=a[s-2]+" "+a[s-1]+" "+a[s];break;case 31:case 33:this.$=a[s-3]+" "+a[s-2]+" "+a[s-1]+" "+a[s];break;case 35:r.parseDirective("%%{","open_directive");break;case 36:r.parseDirective(a[s],"type_directive");break;case 37:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 38:r.parseDirective("}%%","close_directive","gantt")}},table:[{3:1,4:2,5:e,22:4,32:n},{1:[3]},{3:6,4:2,5:e,22:4,32:n},t(r,[2,3],{6:7}),{23:8,33:[1,9]},{33:[2,35]},{1:[2,1]},{4:24,7:[1,10],8:11,9:[1,12],10:13,11:[1,14],12:i,13:a,14:o,15:s,16:c,17:u,18:l,19:22,20:h,22:4,27:f,32:n},{24:26,25:[1,27],35:d},t([25,35],[2,36]),t(r,[2,8],{1:[2,2]}),t(r,[2,4]),{4:24,10:29,12:i,13:a,14:o,15:s,16:c,17:u,18:l,19:22,20:h,22:4,27:f,32:n},t(r,[2,6]),t(r,[2,7]),t(r,[2,9]),t(r,[2,10]),t(r,[2,11]),t(r,[2,12]),t(r,[2,13]),t(r,[2,14]),t(r,[2,15]),t(r,[2,16]),{21:[1,30]},t(r,[2,18]),{28:[1,31],30:[1,32]},{11:[1,33]},{26:34,34:[1,35]},{11:[2,38]},t(r,[2,5]),t(r,[2,17]),t(r,[2,21],{29:[1,36],30:[1,37]}),t(r,[2,27],{28:[1,38]}),t(p,[2,19]),{24:39,35:d},{35:[2,37]},t(r,[2,22],{30:[1,40]}),t(r,[2,23]),t(r,[2,25],{29:[1,41]}),{11:[1,42]},t(r,[2,24]),t(r,[2,26]),t(p,[2,20])],defaultActions:{5:[2,35],6:[2,1],28:[2,38],35:[2,37]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},y={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),32;case 1:return this.begin("type_directive"),33;case 2:return this.popState(),this.begin("arg_directive"),25;case 3:return this.popState(),this.popState(),35;case 4:return 34;case 5:case 6:case 7:break;case 8:return 11;case 9:case 10:case 11:break;case 12:this.begin("href");break;case 13:this.popState();break;case 14:return 30;case 15:this.begin("callbackname");break;case 16:this.popState();break;case 17:this.popState(),this.begin("callbackargs");break;case 18:return 28;case 19:this.popState();break;case 20:return 29;case 21:this.begin("click");break;case 22:this.popState();break;case 23:return 27;case 24:return 5;case 25:return 12;case 26:return 13;case 27:return 14;case 28:return 15;case 29:return 16;case 30:return"date";case 31:return 17;case 32:return 18;case 33:return 20;case 34:return 21;case 35:return 25;case 36:return 7;case 37:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%%(?!\{)*[^\n]*)/i,/^(?:[^\}]%%*[^\n]*)/i,/^(?:%%*[^\n]*[\n]*)/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:href[\s]+["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:call[\s]+)/i,/^(?:\([\s]*\))/i,/^(?:\()/i,/^(?:[^(]*)/i,/^(?:\))/i,/^(?:[^)]*)/i,/^(?:click[\s]+)/i,/^(?:[\s\n])/i,/^(?:[^\s\n]*)/i,/^(?:gantt\b)/i,/^(?:dateFormat\s[^#\n;]+)/i,/^(?:inclusiveEndDates\b)/i,/^(?:axisFormat\s[^#\n;]+)/i,/^(?:excludes\s[^#\n;]+)/i,/^(?:todayMarker\s[^\n;]+)/i,/^(?:\d\d\d\d-\d\d-\d\d\b)/i,/^(?:title\s[^#\n;]+)/i,/^(?:section\s[^#:\n;]+)/i,/^(?:[^#:\n;]+)/i,/^(?::[^#\n;]+)/i,/^(?::)/i,/^(?:$)/i,/^(?:.)/i],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},callbackargs:{rules:[19,20],inclusive:!1},callbackname:{rules:[16,17,18],inclusive:!1},href:{rules:[13,14],inclusive:!1},click:{rules:[22,23],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,15,21,24,25,26,27,28,29,30,31,32,33,34,35,36,37],inclusive:!0}}};function v(){this.yy={}}return g.lexer=y,v.prototype=g,g.Parser=v,new v}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,5],r=[6,9,11,17,18,19,21],i=[1,15],a=[1,16],o=[1,17],s=[1,21],c=[4,6,9,11,17,18,19,21],u={trace:function(){},yy:{},symbols_:{error:2,start:3,journey:4,document:5,EOF:6,directive:7,line:8,SPACE:9,statement:10,NEWLINE:11,openDirective:12,typeDirective:13,closeDirective:14,":":15,argDirective:16,title:17,section:18,taskName:19,taskData:20,open_directive:21,type_directive:22,arg_directive:23,close_directive:24,$accept:0,$end:1},terminals_:{2:"error",4:"journey",6:"EOF",9:"SPACE",11:"NEWLINE",15:":",17:"title",18:"section",19:"taskName",20:"taskData",21:"open_directive",22:"type_directive",23:"arg_directive",24:"close_directive"},productions_:[0,[3,3],[3,2],[5,0],[5,2],[8,2],[8,1],[8,1],[8,1],[7,4],[7,6],[10,1],[10,1],[10,2],[10,1],[12,1],[13,1],[16,1],[14,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 1:return a[s-1];case 3:this.$=[];break;case 4:a[s-1].push(a[s]),this.$=a[s-1];break;case 5:case 6:this.$=a[s];break;case 7:case 8:this.$=[];break;case 11:r.setTitle(a[s].substr(6)),this.$=a[s].substr(6);break;case 12:r.addSection(a[s].substr(8)),this.$=a[s].substr(8);break;case 13:r.addTask(a[s-1],a[s]),this.$="task";break;case 15:r.parseDirective("%%{","open_directive");break;case 16:r.parseDirective(a[s],"type_directive");break;case 17:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 18:r.parseDirective("}%%","close_directive","journey")}},table:[{3:1,4:e,7:3,12:4,21:n},{1:[3]},t(r,[2,3],{5:6}),{3:7,4:e,7:3,12:4,21:n},{13:8,22:[1,9]},{22:[2,15]},{6:[1,10],7:18,8:11,9:[1,12],10:13,11:[1,14],12:4,17:i,18:a,19:o,21:n},{1:[2,2]},{14:19,15:[1,20],24:s},t([15,24],[2,16]),t(r,[2,8],{1:[2,1]}),t(r,[2,4]),{7:18,10:22,12:4,17:i,18:a,19:o,21:n},t(r,[2,6]),t(r,[2,7]),t(r,[2,11]),t(r,[2,12]),{20:[1,23]},t(r,[2,14]),{11:[1,24]},{16:25,23:[1,26]},{11:[2,18]},t(r,[2,5]),t(r,[2,13]),t(c,[2,9]),{14:27,24:s},{24:[2,17]},{11:[1,28]},t(c,[2,10])],defaultActions:{5:[2,15],7:[2,2],21:[2,18],26:[2,17]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},l={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),21;case 1:return this.begin("type_directive"),22;case 2:return this.popState(),this.begin("arg_directive"),15;case 3:return this.popState(),this.popState(),24;case 4:return 23;case 5:case 6:break;case 7:return 11;case 8:case 9:break;case 10:return 4;case 11:return 17;case 12:return 18;case 13:return 19;case 14:return 20;case 15:return 15;case 16:return 6;case 17:return"INVALID"}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:journey\b)/i,/^(?:title\s[^#\n;]+)/i,/^(?:section\s[^#:\n;]+)/i,/^(?:[^#:\n;]+)/i,/^(?::[^#\n;]+)/i,/^(?::)/i,/^(?:$)/i,/^(?:.)/i],conditions:{open_directive:{rules:[1],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,13,14,15,16,17],inclusive:!0}}};function h(){this.yy={}}return u.lexer=l,h.prototype=u,u.Parser=h,new h}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(15);e.default=function(t,e){return r.default.lang.round(i.default.parse(t)[e])}},function(t,e,n){var r=n(112),i=n(82),a=n(24);t.exports=function(t){return a(t)?r(t):i(t)}},function(t,e,n){var r;if(!r)try{r=n(0)}catch(t){}r||(r=window.d3),t.exports=r},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(15);e.default=function(t,e,n){var a=i.default.parse(t),o=a[e],s=r.default.channel.clamp[e](o+n);return o!==s&&(a[e]=s),i.default.stringify(a)}},function(t,e,n){var r=n(210),i=n(216);t.exports=function(t,e){var n=i(t,e);return r(n)?n:void 0}},function(t,e,n){var r=n(38),i=n(212),a=n(213),o=r?r.toStringTag:void 0;t.exports=function(t){return null==t?void 0===t?"[object Undefined]":"[object Null]":o&&o in Object(t)?i(t):a(t)}},function(t,e){t.exports=function(t){return t}},function(t,e){t.exports=function(t,e){return t===e||t!=t&&e!=e}},function(t,e,n){var r=n(34),i=n(11);t.exports=function(t){if(!i(t))return!1;var e=r(t);return"[object Function]"==e||"[object GeneratorFunction]"==e||"[object AsyncFunction]"==e||"[object Proxy]"==e}},function(t,e,n){var r=n(16).Symbol;t.exports=r},function(t,e,n){(function(t){var r=n(16),i=n(232),a=e&&!e.nodeType&&e,o=a&&"object"==typeof t&&t&&!t.nodeType&&t,s=o&&o.exports===a?r.Buffer:void 0,c=(s?s.isBuffer:void 0)||i;t.exports=c}).call(this,n(7)(t))},function(t,e,n){var r=n(112),i=n(236),a=n(24);t.exports=function(t){return a(t)?r(t,!0):i(t)}},function(t,e,n){var r=n(241),i=n(77),a=n(242),o=n(121),s=n(243),c=n(34),u=n(110),l=u(r),h=u(i),f=u(a),d=u(o),p=u(s),g=c;(r&&"[object DataView]"!=g(new r(new ArrayBuffer(1)))||i&&"[object Map]"!=g(new i)||a&&"[object Promise]"!=g(a.resolve())||o&&"[object Set]"!=g(new o)||s&&"[object WeakMap]"!=g(new s))&&(g=function(t){var e=c(t),n="[object Object]"==e?t.constructor:void 0,r=n?u(n):"";if(r)switch(r){case l:return"[object DataView]";case h:return"[object Map]";case f:return"[object Promise]";case d:return"[object Set]";case p:return"[object WeakMap]"}return e}),t.exports=g},function(t,e,n){var r=n(34),i=n(21);t.exports=function(t){return"symbol"==typeof t||i(t)&&"[object Symbol]"==r(t)}},function(t,e,n){var r;try{r={defaults:n(154),each:n(87),isFunction:n(37),isPlainObject:n(158),pick:n(161),has:n(93),range:n(162),uniqueId:n(163)}}catch(t){}r||(r=window._),t.exports=r},function(t){t.exports=JSON.parse('{"name":"mermaid","version":"8.8.3","description":"Markdownish syntax for generating flowcharts, sequence diagrams, class diagrams, gantt charts and git graphs.","main":"dist/mermaid.core.js","keywords":["diagram","markdown","flowchart","sequence diagram","gantt","class diagram","git graph"],"scripts":{"build:development":"webpack --progress --colors","build:production":"yarn build:development -p --config webpack.config.prod.babel.js","build":"yarn build:development && yarn build:production","postbuild":"documentation build src/mermaidAPI.js src/config.js --shallow -f md --markdown-toc false > docs/Setup.md","build:watch":"yarn build --watch","minify":"minify ./dist/mermaid.js > ./dist/mermaid.min.js","release":"yarn build","lint":"eslint src","e2e:depr":"yarn lint && jest e2e --config e2e/jest.config.js","cypress":"percy exec -- cypress run","e2e":"start-server-and-test dev http://localhost:9000/ cypress","e2e-upd":"yarn lint && jest e2e -u --config e2e/jest.config.js","dev":"webpack-dev-server --config webpack.config.e2e.js","test":"yarn lint && jest src/.*","test:watch":"jest --watch src","prepublishOnly":"yarn build && yarn test","prepare":"yarn build"},"repository":{"type":"git","url":"https://github.com/knsv/mermaid"},"author":"Knut Sveidqvist","license":"MIT","standard":{"ignore":["**/parser/*.js","dist/**/*.js","cypress/**/*.js"],"globals":["page"]},"dependencies":{"@braintree/sanitize-url":"^3.1.0","babel-eslint":"^10.1.0","d3":"^5.7.0","dagre":"^0.8.4","dagre-d3":"^0.6.4","entity-decode":"^2.0.2","graphlib":"^2.1.7","he":"^1.2.0","khroma":"^1.1.0","minify":"^4.1.1","moment-mini":"^2.22.1","stylis":"^3.5.2"},"devDependencies":{"@babel/core":"^7.2.2","@babel/preset-env":"^7.8.4","@babel/register":"^7.0.0","@percy/cypress":"*","babel-core":"7.0.0-bridge.0","babel-jest":"^24.9.0","babel-loader":"^8.0.4","coveralls":"^3.0.2","css-loader":"^2.0.1","css-to-string-loader":"^0.1.3","cypress":"4.0.1","documentation":"^12.0.1","eslint":"^6.3.0","eslint-config-prettier":"^6.3.0","eslint-plugin-prettier":"^3.1.0","husky":"^1.2.1","identity-obj-proxy":"^3.0.0","jest":"^24.9.0","jison":"^0.4.18","moment":"^2.23.0","node-sass":"^4.12.0","prettier":"^1.18.2","puppeteer":"^1.17.0","sass-loader":"^7.1.0","start-server-and-test":"^1.10.6","terser-webpack-plugin":"^2.2.2","webpack":"^4.41.2","webpack-bundle-analyzer":"^3.7.0","webpack-cli":"^3.1.2","webpack-dev-server":"^3.4.1","webpack-node-externals":"^1.7.2","yarn-upgrade-all":"^0.5.0"},"files":["dist"],"yarn-upgrade-all":{"ignore":["babel-core"]},"sideEffects":["**/*.css","**/*.scss"],"husky":{"hooks":{"pre-push":"yarn test"}}}')},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=new(n(176).default)({r:0,g:0,b:0,a:0},"transparent");e.default=r},function(t,e,n){var r=n(58),i=n(59);t.exports=function(t,e,n,a){var o=!n;n||(n={});for(var s=-1,c=e.length;++s-1&&t%1==0&&t-1}(s)?s:(n=s.match(a))?(e=n[0],r.test(e)?"about:blank":s):"about:blank"}}},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[2,3],n=[1,7],r=[7,12,15,17,19,20,21],i=[7,11,12,15,17,19,20,21],a=[2,20],o=[1,32],s={trace:function(){},yy:{},symbols_:{error:2,start:3,GG:4,":":5,document:6,EOF:7,DIR:8,options:9,body:10,OPT:11,NL:12,line:13,statement:14,COMMIT:15,commit_arg:16,BRANCH:17,ID:18,CHECKOUT:19,MERGE:20,RESET:21,reset_arg:22,STR:23,HEAD:24,reset_parents:25,CARET:26,$accept:0,$end:1},terminals_:{2:"error",4:"GG",5:":",7:"EOF",8:"DIR",11:"OPT",12:"NL",15:"COMMIT",17:"BRANCH",18:"ID",19:"CHECKOUT",20:"MERGE",21:"RESET",23:"STR",24:"HEAD",26:"CARET"},productions_:[0,[3,4],[3,5],[6,0],[6,2],[9,2],[9,1],[10,0],[10,2],[13,2],[13,1],[14,2],[14,2],[14,2],[14,2],[14,2],[16,0],[16,1],[22,2],[22,2],[25,0],[25,2]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 1:return a[s-1];case 2:return r.setDirection(a[s-3]),a[s-1];case 4:r.setOptions(a[s-1]),this.$=a[s];break;case 5:a[s-1]+=a[s],this.$=a[s-1];break;case 7:this.$=[];break;case 8:a[s-1].push(a[s]),this.$=a[s-1];break;case 9:this.$=a[s-1];break;case 11:r.commit(a[s]);break;case 12:r.branch(a[s]);break;case 13:r.checkout(a[s]);break;case 14:r.merge(a[s]);break;case 15:r.reset(a[s]);break;case 16:this.$="";break;case 17:this.$=a[s];break;case 18:this.$=a[s-1]+":"+a[s];break;case 19:this.$=a[s-1]+":"+r.count,r.count=0;break;case 20:r.count=0;break;case 21:r.count+=1}},table:[{3:1,4:[1,2]},{1:[3]},{5:[1,3],8:[1,4]},{6:5,7:e,9:6,12:n},{5:[1,8]},{7:[1,9]},t(r,[2,7],{10:10,11:[1,11]}),t(i,[2,6]),{6:12,7:e,9:6,12:n},{1:[2,1]},{7:[2,4],12:[1,15],13:13,14:14,15:[1,16],17:[1,17],19:[1,18],20:[1,19],21:[1,20]},t(i,[2,5]),{7:[1,21]},t(r,[2,8]),{12:[1,22]},t(r,[2,10]),{12:[2,16],16:23,23:[1,24]},{18:[1,25]},{18:[1,26]},{18:[1,27]},{18:[1,30],22:28,24:[1,29]},{1:[2,2]},t(r,[2,9]),{12:[2,11]},{12:[2,17]},{12:[2,12]},{12:[2,13]},{12:[2,14]},{12:[2,15]},{12:a,25:31,26:o},{12:a,25:33,26:o},{12:[2,18]},{12:a,25:34,26:o},{12:[2,19]},{12:[2,21]}],defaultActions:{9:[2,1],21:[2,2],23:[2,11],24:[2,17],25:[2,12],26:[2,13],27:[2,14],28:[2,15],31:[2,18],33:[2,19],34:[2,21]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},c={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 12;case 1:case 2:case 3:break;case 4:return 4;case 5:return 15;case 6:return 17;case 7:return 20;case 8:return 21;case 9:return 19;case 10:case 11:return 8;case 12:return 5;case 13:return 26;case 14:this.begin("options");break;case 15:this.popState();break;case 16:return 11;case 17:this.begin("string");break;case 18:this.popState();break;case 19:return 23;case 20:return 18;case 21:return 7}},rules:[/^(?:(\r?\n)+)/i,/^(?:\s+)/i,/^(?:#[^\n]*)/i,/^(?:%[^\n]*)/i,/^(?:gitGraph\b)/i,/^(?:commit\b)/i,/^(?:branch\b)/i,/^(?:merge\b)/i,/^(?:reset\b)/i,/^(?:checkout\b)/i,/^(?:LR\b)/i,/^(?:BT\b)/i,/^(?::)/i,/^(?:\^)/i,/^(?:options\r?\n)/i,/^(?:end\r?\n)/i,/^(?:[^\n]+\r?\n)/i,/^(?:["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:[a-zA-Z][-_\.a-zA-Z0-9]*[-_a-zA-Z0-9])/i,/^(?:$)/i],conditions:{options:{rules:[15,16],inclusive:!1},string:{rules:[18,19],inclusive:!1},INITIAL:{rules:[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,17,20,21],inclusive:!0}}};function u(){this.yy={}}return s.lexer=c,u.prototype=s,s.Parser=u,new u}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[6,9,10],n={trace:function(){},yy:{},symbols_:{error:2,start:3,info:4,document:5,EOF:6,line:7,statement:8,NL:9,showInfo:10,$accept:0,$end:1},terminals_:{2:"error",4:"info",6:"EOF",9:"NL",10:"showInfo"},productions_:[0,[3,3],[5,0],[5,2],[7,1],[7,1],[8,1]],performAction:function(t,e,n,r,i,a,o){a.length;switch(i){case 1:return r;case 4:break;case 6:r.setInfo(!0)}},table:[{3:1,4:[1,2]},{1:[3]},t(e,[2,2],{5:3}),{6:[1,4],7:5,8:6,9:[1,7],10:[1,8]},{1:[2,1]},t(e,[2,3]),t(e,[2,4]),t(e,[2,5]),t(e,[2,6])],defaultActions:{4:[2,1]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},r={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return 4;case 1:return 9;case 2:return"space";case 3:return 10;case 4:return 6;case 5:return"TXT"}},rules:[/^(?:info\b)/i,/^(?:[\s\n\r]+)/i,/^(?:[\s]+)/i,/^(?:showInfo\b)/i,/^(?:$)/i,/^(?:.)/i],conditions:{INITIAL:{rules:[0,1,2,3,4,5],inclusive:!0}}};function i(){this.yy={}}return n.lexer=r,i.prototype=n,n.Parser=i,new i}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,4],n=[1,5],r=[1,6],i=[1,7],a=[1,9],o=[1,10,12,19,20,21,22],s=[1,6,10,12,19,20,21,22],c=[19,20,21],u=[1,22],l=[6,19,20,21,22],h={trace:function(){},yy:{},symbols_:{error:2,start:3,eol:4,directive:5,PIE:6,document:7,line:8,statement:9,txt:10,value:11,title:12,title_value:13,openDirective:14,typeDirective:15,closeDirective:16,":":17,argDirective:18,NEWLINE:19,";":20,EOF:21,open_directive:22,type_directive:23,arg_directive:24,close_directive:25,$accept:0,$end:1},terminals_:{2:"error",6:"PIE",10:"txt",11:"value",12:"title",13:"title_value",17:":",19:"NEWLINE",20:";",21:"EOF",22:"open_directive",23:"type_directive",24:"arg_directive",25:"close_directive"},productions_:[0,[3,2],[3,2],[3,2],[7,0],[7,2],[8,2],[9,0],[9,2],[9,2],[9,1],[5,3],[5,5],[4,1],[4,1],[4,1],[14,1],[15,1],[18,1],[16,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 6:this.$=a[s-1];break;case 8:r.addSection(a[s-1],r.cleanupValue(a[s]));break;case 9:this.$=a[s].trim(),r.setTitle(this.$);break;case 16:r.parseDirective("%%{","open_directive");break;case 17:r.parseDirective(a[s],"type_directive");break;case 18:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 19:r.parseDirective("}%%","close_directive","pie")}},table:[{3:1,4:2,5:3,6:e,14:8,19:n,20:r,21:i,22:a},{1:[3]},{3:10,4:2,5:3,6:e,14:8,19:n,20:r,21:i,22:a},{3:11,4:2,5:3,6:e,14:8,19:n,20:r,21:i,22:a},t(o,[2,4],{7:12}),t(s,[2,13]),t(s,[2,14]),t(s,[2,15]),{15:13,23:[1,14]},{23:[2,16]},{1:[2,1]},{1:[2,2]},t(c,[2,7],{14:8,8:15,9:16,5:19,1:[2,3],10:[1,17],12:[1,18],22:a}),{16:20,17:[1,21],25:u},t([17,25],[2,17]),t(o,[2,5]),{4:23,19:n,20:r,21:i},{11:[1,24]},{13:[1,25]},t(c,[2,10]),t(l,[2,11]),{18:26,24:[1,27]},t(l,[2,19]),t(o,[2,6]),t(c,[2,8]),t(c,[2,9]),{16:28,25:u},{25:[2,18]},t(l,[2,12])],defaultActions:{9:[2,16],10:[2,1],11:[2,2],27:[2,18]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},f={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),22;case 1:return this.begin("type_directive"),23;case 2:return this.popState(),this.begin("arg_directive"),17;case 3:return this.popState(),this.popState(),25;case 4:return 24;case 5:case 6:break;case 7:return 19;case 8:case 9:break;case 10:return this.begin("title"),12;case 11:return this.popState(),"title_value";case 12:this.begin("string");break;case 13:this.popState();break;case 14:return"txt";case 15:return 6;case 16:return"value";case 17:return 21}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n\r]+)/i,/^(?:%%[^\n]*)/i,/^(?:[\s]+)/i,/^(?:title\b)/i,/^(?:(?!\n||)*[^\n]*)/i,/^(?:["])/i,/^(?:["])/i,/^(?:[^"]*)/i,/^(?:pie\b)/i,/^(?::[\s]*[\d]+(?:\.[\d]+)?)/i,/^(?:$)/i],conditions:{close_directive:{rules:[],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},open_directive:{rules:[1],inclusive:!1},title:{rules:[11],inclusive:!1},string:{rules:[13,14],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,12,15,16,17],inclusive:!0}}};function d(){this.yy={}}return h.lexer=f,d.prototype=h,h.Parser=d,new d}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){(function(t,r){var i=function(){var t=function(t,e,n,r){for(n=n||{},r=t.length;r--;n[t[r]]=e);return n},e=[1,2],n=[1,5],r=[6,9,11,20,30],i=[1,17],a=[1,20],o=[1,24],s=[1,25],c=[1,26],u=[1,27],l=[20,27,28],h=[4,6,9,11,20,30],f=[23,24,25,26],d={trace:function(){},yy:{},symbols_:{error:2,start:3,ER_DIAGRAM:4,document:5,EOF:6,directive:7,line:8,SPACE:9,statement:10,NEWLINE:11,openDirective:12,typeDirective:13,closeDirective:14,":":15,argDirective:16,entityName:17,relSpec:18,role:19,ALPHANUM:20,cardinality:21,relType:22,ZERO_OR_ONE:23,ZERO_OR_MORE:24,ONE_OR_MORE:25,ONLY_ONE:26,NON_IDENTIFYING:27,IDENTIFYING:28,WORD:29,open_directive:30,type_directive:31,arg_directive:32,close_directive:33,$accept:0,$end:1},terminals_:{2:"error",4:"ER_DIAGRAM",6:"EOF",9:"SPACE",11:"NEWLINE",15:":",20:"ALPHANUM",23:"ZERO_OR_ONE",24:"ZERO_OR_MORE",25:"ONE_OR_MORE",26:"ONLY_ONE",27:"NON_IDENTIFYING",28:"IDENTIFYING",29:"WORD",30:"open_directive",31:"type_directive",32:"arg_directive",33:"close_directive"},productions_:[0,[3,3],[3,2],[5,0],[5,2],[8,2],[8,1],[8,1],[8,1],[7,4],[7,6],[10,1],[10,5],[10,1],[17,1],[18,3],[21,1],[21,1],[21,1],[21,1],[22,1],[22,1],[19,1],[19,1],[12,1],[13,1],[16,1],[14,1]],performAction:function(t,e,n,r,i,a,o){var s=a.length-1;switch(i){case 1:break;case 3:this.$=[];break;case 4:a[s-1].push(a[s]),this.$=a[s-1];break;case 5:case 6:this.$=a[s];break;case 7:case 8:this.$=[];break;case 12:r.addEntity(a[s-4]),r.addEntity(a[s-2]),r.addRelationship(a[s-4],a[s],a[s-2],a[s-3]);break;case 13:r.addEntity(a[s]);break;case 14:this.$=a[s];break;case 15:this.$={cardA:a[s],relType:a[s-1],cardB:a[s-2]};break;case 16:this.$=r.Cardinality.ZERO_OR_ONE;break;case 17:this.$=r.Cardinality.ZERO_OR_MORE;break;case 18:this.$=r.Cardinality.ONE_OR_MORE;break;case 19:this.$=r.Cardinality.ONLY_ONE;break;case 20:this.$=r.Identification.NON_IDENTIFYING;break;case 21:this.$=r.Identification.IDENTIFYING;break;case 22:this.$=a[s].replace(/"/g,"");break;case 23:this.$=a[s];break;case 24:r.parseDirective("%%{","open_directive");break;case 25:r.parseDirective(a[s],"type_directive");break;case 26:a[s]=a[s].trim().replace(/'/g,'"'),r.parseDirective(a[s],"arg_directive");break;case 27:r.parseDirective("}%%","close_directive","er")}},table:[{3:1,4:e,7:3,12:4,30:n},{1:[3]},t(r,[2,3],{5:6}),{3:7,4:e,7:3,12:4,30:n},{13:8,31:[1,9]},{31:[2,24]},{6:[1,10],7:15,8:11,9:[1,12],10:13,11:[1,14],12:4,17:16,20:i,30:n},{1:[2,2]},{14:18,15:[1,19],33:a},t([15,33],[2,25]),t(r,[2,8],{1:[2,1]}),t(r,[2,4]),{7:15,10:21,12:4,17:16,20:i,30:n},t(r,[2,6]),t(r,[2,7]),t(r,[2,11]),t(r,[2,13],{18:22,21:23,23:o,24:s,25:c,26:u}),t([6,9,11,15,20,23,24,25,26,30],[2,14]),{11:[1,28]},{16:29,32:[1,30]},{11:[2,27]},t(r,[2,5]),{17:31,20:i},{22:32,27:[1,33],28:[1,34]},t(l,[2,16]),t(l,[2,17]),t(l,[2,18]),t(l,[2,19]),t(h,[2,9]),{14:35,33:a},{33:[2,26]},{15:[1,36]},{21:37,23:o,24:s,25:c,26:u},t(f,[2,20]),t(f,[2,21]),{11:[1,38]},{19:39,20:[1,41],29:[1,40]},{20:[2,15]},t(h,[2,10]),t(r,[2,12]),t(r,[2,22]),t(r,[2,23])],defaultActions:{5:[2,24],7:[2,2],20:[2,27],30:[2,26],37:[2,15]},parseError:function(t,e){if(!e.recoverable){var n=new Error(t);throw n.hash=e,n}this.trace(t)},parse:function(t){var e=this,n=[0],r=[],i=[null],a=[],o=this.table,s="",c=0,u=0,l=0,h=2,f=1,d=a.slice.call(arguments,1),p=Object.create(this.lexer),g={yy:{}};for(var y in this.yy)Object.prototype.hasOwnProperty.call(this.yy,y)&&(g.yy[y]=this.yy[y]);p.setInput(t,g.yy),g.yy.lexer=p,g.yy.parser=this,void 0===p.yylloc&&(p.yylloc={});var v=p.yylloc;a.push(v);var m=p.options&&p.options.ranges;function b(){var t;return"number"!=typeof(t=r.pop()||p.lex()||f)&&(t instanceof Array&&(t=(r=t).pop()),t=e.symbols_[t]||t),t}"function"==typeof g.yy.parseError?this.parseError=g.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;for(var x,_,k,w,E,T,C,S,A,M={};;){if(k=n[n.length-1],this.defaultActions[k]?w=this.defaultActions[k]:(null==x&&(x=b()),w=o[k]&&o[k][x]),void 0===w||!w.length||!w[0]){var O="";for(T in A=[],o[k])this.terminals_[T]&&T>h&&A.push("'"+this.terminals_[T]+"'");O=p.showPosition?"Parse error on line "+(c+1)+":\n"+p.showPosition()+"\nExpecting "+A.join(", ")+", got '"+(this.terminals_[x]||x)+"'":"Parse error on line "+(c+1)+": Unexpected "+(x==f?"end of input":"'"+(this.terminals_[x]||x)+"'"),this.parseError(O,{text:p.match,token:this.terminals_[x]||x,line:p.yylineno,loc:v,expected:A})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+k+", token: "+x);switch(w[0]){case 1:n.push(x),i.push(p.yytext),a.push(p.yylloc),n.push(w[1]),x=null,_?(x=_,_=null):(u=p.yyleng,s=p.yytext,c=p.yylineno,v=p.yylloc,l>0&&l--);break;case 2:if(C=this.productions_[w[1]][1],M.$=i[i.length-C],M._$={first_line:a[a.length-(C||1)].first_line,last_line:a[a.length-1].last_line,first_column:a[a.length-(C||1)].first_column,last_column:a[a.length-1].last_column},m&&(M._$.range=[a[a.length-(C||1)].range[0],a[a.length-1].range[1]]),void 0!==(E=this.performAction.apply(M,[s,u,c,g.yy,w[1],i,a].concat(d))))return E;C&&(n=n.slice(0,-1*C*2),i=i.slice(0,-1*C),a=a.slice(0,-1*C)),n.push(this.productions_[w[1]][0]),i.push(M.$),a.push(M._$),S=o[n[n.length-2]][n[n.length-1]],n.push(S);break;case 3:return!0}}return!0}},p={EOF:1,parseError:function(t,e){if(!this.yy.parser)throw new Error(t);this.yy.parser.parseError(t,e)},setInput:function(t,e){return this.yy=e||this.yy||{},this._input=t,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},input:function(){var t=this._input[0];return this.yytext+=t,this.yyleng++,this.offset++,this.match+=t,this.matched+=t,t.match(/(?:\r\n?|\n).*/g)?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),t},unput:function(t){var e=t.length,n=t.split(/(?:\r\n?|\n)/g);this._input=t+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-e),this.offset-=e;var r=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),n.length-1&&(this.yylineno-=n.length-1);var i=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:n?(n.length===r.length?this.yylloc.first_column:0)+r[r.length-n.length].length-n[0].length:this.yylloc.first_column-e},this.options.ranges&&(this.yylloc.range=[i[0],i[0]+this.yyleng-e]),this.yyleng=this.yytext.length,this},more:function(){return this._more=!0,this},reject:function(){return this.options.backtrack_lexer?(this._backtrack=!0,this):this.parseError("Lexical error on line "+(this.yylineno+1)+". You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},less:function(t){this.unput(this.match.slice(t))},pastInput:function(){var t=this.matched.substr(0,this.matched.length-this.match.length);return(t.length>20?"...":"")+t.substr(-20).replace(/\n/g,"")},upcomingInput:function(){var t=this.match;return t.length<20&&(t+=this._input.substr(0,20-t.length)),(t.substr(0,20)+(t.length>20?"...":"")).replace(/\n/g,"")},showPosition:function(){var t=this.pastInput(),e=new Array(t.length+1).join("-");return t+this.upcomingInput()+"\n"+e+"^"},test_match:function(t,e){var n,r,i;if(this.options.backtrack_lexer&&(i={yylineno:this.yylineno,yylloc:{first_line:this.yylloc.first_line,last_line:this.last_line,first_column:this.yylloc.first_column,last_column:this.yylloc.last_column},yytext:this.yytext,match:this.match,matches:this.matches,matched:this.matched,yyleng:this.yyleng,offset:this.offset,_more:this._more,_input:this._input,yy:this.yy,conditionStack:this.conditionStack.slice(0),done:this.done},this.options.ranges&&(i.yylloc.range=this.yylloc.range.slice(0))),(r=t[0].match(/(?:\r\n?|\n).*/g))&&(this.yylineno+=r.length),this.yylloc={first_line:this.yylloc.last_line,last_line:this.yylineno+1,first_column:this.yylloc.last_column,last_column:r?r[r.length-1].length-r[r.length-1].match(/\r?\n?/)[0].length:this.yylloc.last_column+t[0].length},this.yytext+=t[0],this.match+=t[0],this.matches=t,this.yyleng=this.yytext.length,this.options.ranges&&(this.yylloc.range=[this.offset,this.offset+=this.yyleng]),this._more=!1,this._backtrack=!1,this._input=this._input.slice(t[0].length),this.matched+=t[0],n=this.performAction.call(this,this.yy,this,e,this.conditionStack[this.conditionStack.length-1]),this.done&&this._input&&(this.done=!1),n)return n;if(this._backtrack){for(var a in i)this[a]=i[a];return!1}return!1},next:function(){if(this.done)return this.EOF;var t,e,n,r;this._input||(this.done=!0),this._more||(this.yytext="",this.match="");for(var i=this._currentRules(),a=0;ae[0].length)){if(e=n,r=a,this.options.backtrack_lexer){if(!1!==(t=this.test_match(n,i[a])))return t;if(this._backtrack){e=!1;continue}return!1}if(!this.options.flex)break}return e?!1!==(t=this.test_match(e,i[r]))&&t:""===this._input?this.EOF:this.parseError("Lexical error on line "+(this.yylineno+1)+". Unrecognized text.\n"+this.showPosition(),{text:"",token:null,line:this.yylineno})},lex:function(){var t=this.next();return t||this.lex()},begin:function(t){this.conditionStack.push(t)},popState:function(){return this.conditionStack.length-1>0?this.conditionStack.pop():this.conditionStack[0]},_currentRules:function(){return this.conditionStack.length&&this.conditionStack[this.conditionStack.length-1]?this.conditions[this.conditionStack[this.conditionStack.length-1]].rules:this.conditions.INITIAL.rules},topState:function(t){return(t=this.conditionStack.length-1-Math.abs(t||0))>=0?this.conditionStack[t]:"INITIAL"},pushState:function(t){this.begin(t)},stateStackSize:function(){return this.conditionStack.length},options:{"case-insensitive":!0},performAction:function(t,e,n,r){switch(n){case 0:return this.begin("open_directive"),30;case 1:return this.begin("type_directive"),31;case 2:return this.popState(),this.begin("arg_directive"),15;case 3:return this.popState(),this.popState(),33;case 4:return 32;case 5:case 6:break;case 7:return 11;case 8:break;case 9:return 9;case 10:return 29;case 11:return 4;case 12:return 23;case 13:return 24;case 14:return 25;case 15:return 26;case 16:return 23;case 17:return 24;case 18:return 25;case 19:return 27;case 20:return 28;case 21:case 22:return 27;case 23:return 20;case 24:return e.yytext[0];case 25:return 6}},rules:[/^(?:%%\{)/i,/^(?:((?:(?!\}%%)[^:.])*))/i,/^(?::)/i,/^(?:\}%%)/i,/^(?:((?:(?!\}%%).|\n)*))/i,/^(?:%(?!\{)[^\n]*)/i,/^(?:[^\}]%%[^\n]*)/i,/^(?:[\n]+)/i,/^(?:\s+)/i,/^(?:[\s]+)/i,/^(?:"[^"]*")/i,/^(?:erDiagram\b)/i,/^(?:\|o\b)/i,/^(?:\}o\b)/i,/^(?:\}\|)/i,/^(?:\|\|)/i,/^(?:o\|)/i,/^(?:o\{)/i,/^(?:\|\{)/i,/^(?:\.\.)/i,/^(?:--)/i,/^(?:\.-)/i,/^(?:-\.)/i,/^(?:[A-Za-z][A-Za-z0-9\-_]*)/i,/^(?:.)/i,/^(?:$)/i],conditions:{open_directive:{rules:[1],inclusive:!1},type_directive:{rules:[2,3],inclusive:!1},arg_directive:{rules:[3,4],inclusive:!1},INITIAL:{rules:[0,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25],inclusive:!0}}};function g(){this.yy={}}return d.lexer=p,g.prototype=d,d.Parser=g,new g}();e.parser=i,e.Parser=i.Parser,e.parse=function(){return i.parse.apply(i,arguments)},e.main=function(r){r[1]||(console.log("Usage: "+r[0]+" FILE"),t.exit(1));var i=n(19).readFileSync(n(20).normalize(r[1]),"utf8");return e.parser.parse(i)},n.c[n.s]===r&&e.main(t.argv.slice(1))}).call(this,n(14),n(7)(t))},function(t,e,n){"use strict";var r;Object.defineProperty(e,"__esModule",{value:!0}),function(t){t[t.ALL=0]="ALL",t[t.RGB=1]="RGB",t[t.HSL=2]="HSL"}(r||(r={})),e.TYPE=r},function(t,e,n){"use strict";var r=n(10);t.exports=i;function i(t){this._isDirected=!r.has(t,"directed")||t.directed,this._isMultigraph=!!r.has(t,"multigraph")&&t.multigraph,this._isCompound=!!r.has(t,"compound")&&t.compound,this._label=void 0,this._defaultNodeLabelFn=r.constant(void 0),this._defaultEdgeLabelFn=r.constant(void 0),this._nodes={},this._isCompound&&(this._parent={},this._children={},this._children["\0"]={}),this._in={},this._preds={},this._out={},this._sucs={},this._edgeObjs={},this._edgeLabels={}}function a(t,e){t[e]?t[e]++:t[e]=1}function o(t,e){--t[e]||delete t[e]}function s(t,e,n,i){var a=""+e,o=""+n;if(!t&&a>o){var s=a;a=o,o=s}return a+""+o+""+(r.isUndefined(i)?"\0":i)}function c(t,e,n,r){var i=""+e,a=""+n;if(!t&&i>a){var o=i;i=a,a=o}var s={v:i,w:a};return r&&(s.name=r),s}function u(t,e){return s(t,e.v,e.w,e.name)}i.prototype._nodeCount=0,i.prototype._edgeCount=0,i.prototype.isDirected=function(){return this._isDirected},i.prototype.isMultigraph=function(){return this._isMultigraph},i.prototype.isCompound=function(){return this._isCompound},i.prototype.setGraph=function(t){return this._label=t,this},i.prototype.graph=function(){return this._label},i.prototype.setDefaultNodeLabel=function(t){return r.isFunction(t)||(t=r.constant(t)),this._defaultNodeLabelFn=t,this},i.prototype.nodeCount=function(){return this._nodeCount},i.prototype.nodes=function(){return r.keys(this._nodes)},i.prototype.sources=function(){var t=this;return r.filter(this.nodes(),(function(e){return r.isEmpty(t._in[e])}))},i.prototype.sinks=function(){var t=this;return r.filter(this.nodes(),(function(e){return r.isEmpty(t._out[e])}))},i.prototype.setNodes=function(t,e){var n=arguments,i=this;return r.each(t,(function(t){n.length>1?i.setNode(t,e):i.setNode(t)})),this},i.prototype.setNode=function(t,e){return r.has(this._nodes,t)?(arguments.length>1&&(this._nodes[t]=e),this):(this._nodes[t]=arguments.length>1?e:this._defaultNodeLabelFn(t),this._isCompound&&(this._parent[t]="\0",this._children[t]={},this._children["\0"][t]=!0),this._in[t]={},this._preds[t]={},this._out[t]={},this._sucs[t]={},++this._nodeCount,this)},i.prototype.node=function(t){return this._nodes[t]},i.prototype.hasNode=function(t){return r.has(this._nodes,t)},i.prototype.removeNode=function(t){var e=this;if(r.has(this._nodes,t)){var n=function(t){e.removeEdge(e._edgeObjs[t])};delete this._nodes[t],this._isCompound&&(this._removeFromParentsChildList(t),delete this._parent[t],r.each(this.children(t),(function(t){e.setParent(t)})),delete this._children[t]),r.each(r.keys(this._in[t]),n),delete this._in[t],delete this._preds[t],r.each(r.keys(this._out[t]),n),delete this._out[t],delete this._sucs[t],--this._nodeCount}return this},i.prototype.setParent=function(t,e){if(!this._isCompound)throw new Error("Cannot set parent in a non-compound graph");if(r.isUndefined(e))e="\0";else{for(var n=e+="";!r.isUndefined(n);n=this.parent(n))if(n===t)throw new Error("Setting "+e+" as parent of "+t+" would create a cycle");this.setNode(e)}return this.setNode(t),this._removeFromParentsChildList(t),this._parent[t]=e,this._children[e][t]=!0,this},i.prototype._removeFromParentsChildList=function(t){delete this._children[this._parent[t]][t]},i.prototype.parent=function(t){if(this._isCompound){var e=this._parent[t];if("\0"!==e)return e}},i.prototype.children=function(t){if(r.isUndefined(t)&&(t="\0"),this._isCompound){var e=this._children[t];if(e)return r.keys(e)}else{if("\0"===t)return this.nodes();if(this.hasNode(t))return[]}},i.prototype.predecessors=function(t){var e=this._preds[t];if(e)return r.keys(e)},i.prototype.successors=function(t){var e=this._sucs[t];if(e)return r.keys(e)},i.prototype.neighbors=function(t){var e=this.predecessors(t);if(e)return r.union(e,this.successors(t))},i.prototype.isLeaf=function(t){return 0===(this.isDirected()?this.successors(t):this.neighbors(t)).length},i.prototype.filterNodes=function(t){var e=new this.constructor({directed:this._isDirected,multigraph:this._isMultigraph,compound:this._isCompound});e.setGraph(this.graph());var n=this;r.each(this._nodes,(function(n,r){t(r)&&e.setNode(r,n)})),r.each(this._edgeObjs,(function(t){e.hasNode(t.v)&&e.hasNode(t.w)&&e.setEdge(t,n.edge(t))}));var i={};return this._isCompound&&r.each(e.nodes(),(function(t){e.setParent(t,function t(r){var a=n.parent(r);return void 0===a||e.hasNode(a)?(i[r]=a,a):a in i?i[a]:t(a)}(t))})),e},i.prototype.setDefaultEdgeLabel=function(t){return r.isFunction(t)||(t=r.constant(t)),this._defaultEdgeLabelFn=t,this},i.prototype.edgeCount=function(){return this._edgeCount},i.prototype.edges=function(){return r.values(this._edgeObjs)},i.prototype.setPath=function(t,e){var n=this,i=arguments;return r.reduce(t,(function(t,r){return i.length>1?n.setEdge(t,r,e):n.setEdge(t,r),r})),this},i.prototype.setEdge=function(){var t,e,n,i,o=!1,u=arguments[0];"object"==typeof u&&null!==u&&"v"in u?(t=u.v,e=u.w,n=u.name,2===arguments.length&&(i=arguments[1],o=!0)):(t=u,e=arguments[1],n=arguments[3],arguments.length>2&&(i=arguments[2],o=!0)),t=""+t,e=""+e,r.isUndefined(n)||(n=""+n);var l=s(this._isDirected,t,e,n);if(r.has(this._edgeLabels,l))return o&&(this._edgeLabels[l]=i),this;if(!r.isUndefined(n)&&!this._isMultigraph)throw new Error("Cannot set a named edge when isMultigraph = false");this.setNode(t),this.setNode(e),this._edgeLabels[l]=o?i:this._defaultEdgeLabelFn(t,e,n);var h=c(this._isDirected,t,e,n);return t=h.v,e=h.w,Object.freeze(h),this._edgeObjs[l]=h,a(this._preds[e],t),a(this._sucs[t],e),this._in[e][l]=h,this._out[t][l]=h,this._edgeCount++,this},i.prototype.edge=function(t,e,n){var r=1===arguments.length?u(this._isDirected,arguments[0]):s(this._isDirected,t,e,n);return this._edgeLabels[r]},i.prototype.hasEdge=function(t,e,n){var i=1===arguments.length?u(this._isDirected,arguments[0]):s(this._isDirected,t,e,n);return r.has(this._edgeLabels,i)},i.prototype.removeEdge=function(t,e,n){var r=1===arguments.length?u(this._isDirected,arguments[0]):s(this._isDirected,t,e,n),i=this._edgeObjs[r];return i&&(t=i.v,e=i.w,delete this._edgeLabels[r],delete this._edgeObjs[r],o(this._preds[e],t),o(this._sucs[t],e),delete this._in[e][r],delete this._out[t][r],this._edgeCount--),this},i.prototype.inEdges=function(t,e){var n=this._in[t];if(n){var i=r.values(n);return e?r.filter(i,(function(t){return t.v===e})):i}},i.prototype.outEdges=function(t,e){var n=this._out[t];if(n){var i=r.values(n);return e?r.filter(i,(function(t){return t.w===e})):i}},i.prototype.nodeEdges=function(t,e){var n=this.inEdges(t,e);if(n)return n.concat(this.outEdges(t,e))}},function(t,e,n){var r=n(33)(n(16),"Map");t.exports=r},function(t,e,n){var r=n(217),i=n(224),a=n(226),o=n(227),s=n(228);function c(t){var e=-1,n=null==t?0:t.length;for(this.clear();++e-1&&t%1==0&&t<=9007199254740991}},function(t,e,n){(function(t){var r=n(109),i=e&&!e.nodeType&&e,a=i&&"object"==typeof t&&t&&!t.nodeType&&t,o=a&&a.exports===i&&r.process,s=function(){try{var t=a&&a.require&&a.require("util").types;return t||o&&o.binding&&o.binding("util")}catch(t){}}();t.exports=s}).call(this,n(7)(t))},function(t,e,n){var r=n(62),i=n(234),a=Object.prototype.hasOwnProperty;t.exports=function(t){if(!r(t))return i(t);var e=[];for(var n in Object(t))a.call(t,n)&&"constructor"!=n&&e.push(n);return e}},function(t,e,n){var r=n(116),i=n(117),a=Object.prototype.propertyIsEnumerable,o=Object.getOwnPropertySymbols,s=o?function(t){return null==t?[]:(t=Object(t),r(o(t),(function(e){return a.call(t,e)})))}:i;t.exports=s},function(t,e){t.exports=function(t,e){for(var n=-1,r=e.length,i=t.length;++n0&&a(l)?n>1?t(l,n-1,a,o,s):r(s,l):o||(s[s.length]=l)}return s}},function(t,e,n){var r=n(42);t.exports=function(t,e,n){for(var i=-1,a=t.length;++i4,u=c?1:17,l=c?8:4,h=s?0:-1,f=c?255:15;return i.default.set({r:(r>>l*(h+3)&f)*u,g:(r>>l*(h+2)&f)*u,b:(r>>l*(h+1)&f)*u,a:s?(r&f)*u/255:1},t)}}},stringify:function(t){return t.a<1?"#"+a.DEC2HEX[Math.round(t.r)]+a.DEC2HEX[Math.round(t.g)]+a.DEC2HEX[Math.round(t.b)]+r.default.unit.frac2hex(t.a):"#"+a.DEC2HEX[Math.round(t.r)]+a.DEC2HEX[Math.round(t.g)]+a.DEC2HEX[Math.round(t.b)]}};e.default=o},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(45),a=n(15);e.default=function(t,e,n,o){void 0===o&&(o=1);var s=i.default.set({h:r.default.channel.clamp.h(t),s:r.default.channel.clamp.s(e),l:r.default.channel.clamp.l(n),a:r.default.channel.clamp.a(o)});return a.default.stringify(s)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"a")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(15);e.default=function(t){var e=i.default.parse(t),n=e.r,a=e.g,o=e.b,s=.2126*r.default.channel.toLinear(n)+.7152*r.default.channel.toLinear(a)+.0722*r.default.channel.toLinear(o);return r.default.lang.round(s)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(102);e.default=function(t){return r.default(t)>=.5}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"a",e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"a",-e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(15),i=n(52);e.default=function(t,e){var n=r.default.parse(t),a={};for(var o in e)e[o]&&(a[o]=n[o]+e[o]);return i.default(t,a)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(15),i=n(51);e.default=function(t,e,n){void 0===n&&(n=50);var a=r.default.parse(t),o=a.r,s=a.g,c=a.b,u=a.a,l=r.default.parse(e),h=l.r,f=l.g,d=l.b,p=l.a,g=n/100,y=2*g-1,v=u-p,m=((y*v==-1?y:(y+v)/(1+y*v))+1)/2,b=1-m,x=o*m+h*b,_=s*m+f*b,k=c*m+d*b,w=u*g+p*(1-g);return i.default(x,_,k,w)}},function(t,e,n){var r=n(53),i=n(79),a=n(58),o=n(229),s=n(235),c=n(114),u=n(115),l=n(238),h=n(239),f=n(119),d=n(240),p=n(41),g=n(244),y=n(245),v=n(124),m=n(5),b=n(39),x=n(249),_=n(11),k=n(251),w=n(30),E={};E["[object Arguments]"]=E["[object Array]"]=E["[object ArrayBuffer]"]=E["[object DataView]"]=E["[object Boolean]"]=E["[object Date]"]=E["[object Float32Array]"]=E["[object Float64Array]"]=E["[object Int8Array]"]=E["[object Int16Array]"]=E["[object Int32Array]"]=E["[object Map]"]=E["[object Number]"]=E["[object Object]"]=E["[object RegExp]"]=E["[object Set]"]=E["[object String]"]=E["[object Symbol]"]=E["[object Uint8Array]"]=E["[object Uint8ClampedArray]"]=E["[object Uint16Array]"]=E["[object Uint32Array]"]=!0,E["[object Error]"]=E["[object Function]"]=E["[object WeakMap]"]=!1,t.exports=function t(e,n,T,C,S,A){var M,O=1&n,D=2&n,N=4&n;if(T&&(M=S?T(e,C,S,A):T(e)),void 0!==M)return M;if(!_(e))return e;var B=m(e);if(B){if(M=g(e),!O)return u(e,M)}else{var L=p(e),F="[object Function]"==L||"[object GeneratorFunction]"==L;if(b(e))return c(e,O);if("[object Object]"==L||"[object Arguments]"==L||F&&!S){if(M=D||F?{}:v(e),!O)return D?h(e,s(M,e)):l(e,o(M,e))}else{if(!E[L])return S?e:{};M=y(e,L,O)}}A||(A=new r);var P=A.get(e);if(P)return P;A.set(e,M),k(e)?e.forEach((function(r){M.add(t(r,n,T,r,e,A))})):x(e)&&e.forEach((function(r,i){M.set(i,t(r,n,T,i,e,A))}));var I=N?D?d:f:D?keysIn:w,j=B?void 0:I(e);return i(j||e,(function(r,i){j&&(r=e[i=r]),a(M,i,t(r,n,T,i,e,A))})),M}},function(t,e,n){(function(e){var n="object"==typeof e&&e&&e.Object===Object&&e;t.exports=n}).call(this,n(211))},function(t,e){var n=Function.prototype.toString;t.exports=function(t){if(null!=t){try{return n.call(t)}catch(t){}try{return t+""}catch(t){}}return""}},function(t,e,n){var r=n(33),i=function(){try{var t=r(Object,"defineProperty");return t({},"",{}),t}catch(t){}}();t.exports=i},function(t,e,n){var r=n(230),i=n(47),a=n(5),o=n(39),s=n(60),c=n(48),u=Object.prototype.hasOwnProperty;t.exports=function(t,e){var n=a(t),l=!n&&i(t),h=!n&&!l&&o(t),f=!n&&!l&&!h&&c(t),d=n||l||h||f,p=d?r(t.length,String):[],g=p.length;for(var y in t)!e&&!u.call(t,y)||d&&("length"==y||h&&("offset"==y||"parent"==y)||f&&("buffer"==y||"byteLength"==y||"byteOffset"==y)||s(y,g))||p.push(y);return p}},function(t,e){t.exports=function(t,e){return function(n){return t(e(n))}}},function(t,e,n){(function(t){var r=n(16),i=e&&!e.nodeType&&e,a=i&&"object"==typeof t&&t&&!t.nodeType&&t,o=a&&a.exports===i?r.Buffer:void 0,s=o?o.allocUnsafe:void 0;t.exports=function(t,e){if(e)return t.slice();var n=t.length,r=s?s(n):new t.constructor(n);return t.copy(r),r}}).call(this,n(7)(t))},function(t,e){t.exports=function(t,e){var n=-1,r=t.length;for(e||(e=Array(r));++nl))return!1;var f=c.get(t);if(f&&c.get(e))return f==e;var d=-1,p=!0,g=2&n?new r:void 0;for(c.set(t,e),c.set(e,t);++d0&&(a=c.removeMin(),(o=s[a]).distance!==Number.POSITIVE_INFINITY);)r(a).forEach(u);return s}(t,String(e),n||a,r||function(e){return t.outEdges(e)})};var a=r.constant(1)},function(t,e,n){var r=n(10);function i(){this._arr=[],this._keyIndices={}}t.exports=i,i.prototype.size=function(){return this._arr.length},i.prototype.keys=function(){return this._arr.map((function(t){return t.key}))},i.prototype.has=function(t){return r.has(this._keyIndices,t)},i.prototype.priority=function(t){var e=this._keyIndices[t];if(void 0!==e)return this._arr[e].priority},i.prototype.min=function(){if(0===this.size())throw new Error("Queue underflow");return this._arr[0].key},i.prototype.add=function(t,e){var n=this._keyIndices;if(t=String(t),!r.has(n,t)){var i=this._arr,a=i.length;return n[t]=a,i.push({key:t,priority:e}),this._decrease(a),!0}return!1},i.prototype.removeMin=function(){this._swap(0,this._arr.length-1);var t=this._arr.pop();return delete this._keyIndices[t.key],this._heapify(0),t.key},i.prototype.decrease=function(t,e){var n=this._keyIndices[t];if(e>this._arr[n].priority)throw new Error("New priority is greater than current priority. Key: "+t+" Old: "+this._arr[n].priority+" New: "+e);this._arr[n].priority=e,this._decrease(n)},i.prototype._heapify=function(t){var e=this._arr,n=2*t,r=n+1,i=t;n>1].priority2?e[2]:void 0;for(u&&a(e[0],e[1],u)&&(r=1);++n1&&o.sort((function(t,e){var r=t.x-n.x,i=t.y-n.y,a=Math.sqrt(r*r+i*i),o=e.x-n.x,s=e.y-n.y,c=Math.sqrt(o*o+s*s);return aMath.abs(o)*u?(s<0&&(u=-u),n=0===s?0:u*o/s,r=u):(o<0&&(c=-c),n=c,r=0===o?0:c*s/o);return{x:i+n,y:a+r}}},function(t,e,n){t.exports=function t(e){"use strict";var n=/^\0+/g,r=/[\0\r\f]/g,i=/: */g,a=/zoo|gra/,o=/([,: ])(transform)/g,s=/,+\s*(?![^(]*[)])/g,c=/ +\s*(?![^(]*[)])/g,u=/ *[\0] */g,l=/,\r+?/g,h=/([\t\r\n ])*\f?&/g,f=/:global\(((?:[^\(\)\[\]]*|\[.*\]|\([^\(\)]*\))*)\)/g,d=/\W+/g,p=/@(k\w+)\s*(\S*)\s*/,g=/::(place)/g,y=/:(read-only)/g,v=/\s+(?=[{\];=:>])/g,m=/([[}=:>])\s+/g,b=/(\{[^{]+?);(?=\})/g,x=/\s{2,}/g,_=/([^\(])(:+) */g,k=/[svh]\w+-[tblr]{2}/,w=/\(\s*(.*)\s*\)/g,E=/([\s\S]*?);/g,T=/-self|flex-/g,C=/[^]*?(:[rp][el]a[\w-]+)[^]*/,S=/stretch|:\s*\w+\-(?:conte|avail)/,A=/([^-])(image-set\()/,M="-webkit-",O="-moz-",D="-ms-",N=1,B=1,L=0,F=1,P=1,I=1,j=0,R=0,Y=0,z=[],U=[],$=0,W=null,V=0,H=1,G="",q="",X="";function Z(t,e,i,a,o){for(var s,c,l=0,h=0,f=0,d=0,v=0,m=0,b=0,x=0,k=0,E=0,T=0,C=0,S=0,A=0,O=0,D=0,j=0,U=0,W=0,Q=i.length,it=Q-1,at="",ot="",st="",ct="",ut="",lt="";O0&&(ot=ot.replace(r,"")),ot.trim().length>0)){switch(b){case 32:case 9:case 59:case 13:case 10:break;default:ot+=i.charAt(O)}b=59}if(1===j)switch(b){case 123:case 125:case 59:case 34:case 39:case 40:case 41:case 44:j=0;case 9:case 13:case 10:case 32:break;default:for(j=0,W=O,v=b,O--,b=59;W0&&(++O,b=v);case 123:W=Q}}switch(b){case 123:for(v=(ot=ot.trim()).charCodeAt(0),T=1,W=++O;O0&&(ot=ot.replace(r,"")),m=ot.charCodeAt(1)){case 100:case 109:case 115:case 45:s=e;break;default:s=z}if(W=(st=Z(e,s,st,m,o+1)).length,Y>0&&0===W&&(W=ot.length),$>0&&(c=nt(3,st,s=J(z,ot,U),e,B,N,W,m,o,a),ot=s.join(""),void 0!==c&&0===(W=(st=c.trim()).length)&&(m=0,st="")),W>0)switch(m){case 115:ot=ot.replace(w,et);case 100:case 109:case 45:st=ot+"{"+st+"}";break;case 107:st=(ot=ot.replace(p,"$1 $2"+(H>0?G:"")))+"{"+st+"}",st=1===P||2===P&&tt("@"+st,3)?"@"+M+st+"@"+st:"@"+st;break;default:st=ot+st,112===a&&(ct+=st,st="")}else st="";break;default:st=Z(e,J(e,ot,U),st,a,o+1)}ut+=st,C=0,j=0,A=0,D=0,U=0,S=0,ot="",st="",b=i.charCodeAt(++O);break;case 125:case 59:if((W=(ot=(D>0?ot.replace(r,""):ot).trim()).length)>1)switch(0===A&&(45===(v=ot.charCodeAt(0))||v>96&&v<123)&&(W=(ot=ot.replace(" ",":")).length),$>0&&void 0!==(c=nt(1,ot,e,t,B,N,ct.length,a,o,a))&&0===(W=(ot=c.trim()).length)&&(ot="\0\0"),v=ot.charCodeAt(0),m=ot.charCodeAt(1),v){case 0:break;case 64:if(105===m||99===m){lt+=ot+i.charAt(O);break}default:if(58===ot.charCodeAt(W-1))break;ct+=K(ot,v,m,ot.charCodeAt(2))}C=0,j=0,A=0,D=0,U=0,ot="",b=i.charCodeAt(++O)}}switch(b){case 13:case 10:if(h+d+f+l+R===0)switch(E){case 41:case 39:case 34:case 64:case 126:case 62:case 42:case 43:case 47:case 45:case 58:case 44:case 59:case 123:case 125:break;default:A>0&&(j=1)}47===h?h=0:F+C===0&&107!==a&&ot.length>0&&(D=1,ot+="\0"),$*V>0&&nt(0,ot,e,t,B,N,ct.length,a,o,a),N=1,B++;break;case 59:case 125:if(h+d+f+l===0){N++;break}default:switch(N++,at=i.charAt(O),b){case 9:case 32:if(d+l+h===0)switch(x){case 44:case 58:case 9:case 32:at="";break;default:32!==b&&(at=" ")}break;case 0:at="\\0";break;case 12:at="\\f";break;case 11:at="\\v";break;case 38:d+h+l===0&&F>0&&(U=1,D=1,at="\f"+at);break;case 108:if(d+h+l+L===0&&A>0)switch(O-A){case 2:112===x&&58===i.charCodeAt(O-3)&&(L=x);case 8:111===k&&(L=k)}break;case 58:d+h+l===0&&(A=O);break;case 44:h+f+d+l===0&&(D=1,at+="\r");break;case 34:case 39:0===h&&(d=d===b?0:0===d?b:d);break;case 91:d+h+f===0&&l++;break;case 93:d+h+f===0&&l--;break;case 41:d+h+l===0&&f--;break;case 40:if(d+h+l===0){if(0===C)switch(2*x+3*k){case 533:break;default:T=0,C=1}f++}break;case 64:h+f+d+l+A+S===0&&(S=1);break;case 42:case 47:if(d+l+f>0)break;switch(h){case 0:switch(2*b+3*i.charCodeAt(O+1)){case 235:h=47;break;case 220:W=O,h=42}break;case 42:47===b&&42===x&&W+2!==O&&(33===i.charCodeAt(W+2)&&(ct+=i.substring(W,O+1)),at="",h=0)}}if(0===h){if(F+d+l+S===0&&107!==a&&59!==b)switch(b){case 44:case 126:case 62:case 43:case 41:case 40:if(0===C){switch(x){case 9:case 32:case 10:case 13:at+="\0";break;default:at="\0"+at+(44===b?"":"\0")}D=1}else switch(b){case 40:A+7===O&&108===x&&(A=0),C=++T;break;case 41:0==(C=--T)&&(D=1,at+="\0")}break;case 9:case 32:switch(x){case 0:case 123:case 125:case 59:case 44:case 12:case 9:case 32:case 10:case 13:break;default:0===C&&(D=1,at+="\0")}}ot+=at,32!==b&&9!==b&&(E=b)}}k=x,x=b,O++}if(W=ct.length,Y>0&&0===W&&0===ut.length&&0===e[0].length==0&&(109!==a||1===e.length&&(F>0?q:X)===e[0])&&(W=e.join(",").length+2),W>0){if(s=0===F&&107!==a?function(t){for(var e,n,i=0,a=t.length,o=Array(a);i1)){if(f=c.charCodeAt(c.length-1),d=n.charCodeAt(0),e="",0!==l)switch(f){case 42:case 126:case 62:case 43:case 32:case 40:break;default:e=" "}switch(d){case 38:n=e+q;case 126:case 62:case 43:case 32:case 41:case 40:break;case 91:n=e+n+q;break;case 58:switch(2*n.charCodeAt(1)+3*n.charCodeAt(2)){case 530:if(I>0){n=e+n.substring(8,h-1);break}default:(l<1||s[l-1].length<1)&&(n=e+q+n)}break;case 44:e="";default:n=h>1&&n.indexOf(":")>0?e+n.replace(_,"$1"+q+"$2"):e+n+q}c+=n}o[i]=c.replace(r,"").trim()}return o}(e):e,$>0&&void 0!==(c=nt(2,ct,s,t,B,N,W,a,o,a))&&0===(ct=c).length)return lt+ct+ut;if(ct=s.join(",")+"{"+ct+"}",P*L!=0){switch(2!==P||tt(ct,2)||(L=0),L){case 111:ct=ct.replace(y,":-moz-$1")+ct;break;case 112:ct=ct.replace(g,"::-webkit-input-$1")+ct.replace(g,"::-moz-$1")+ct.replace(g,":-ms-input-$1")+ct}L=0}}return lt+ct+ut}function J(t,e,n){var r=e.trim().split(l),i=r,a=r.length,o=t.length;switch(o){case 0:case 1:for(var s=0,c=0===o?"":t[0]+" ";s0&&F>0)return i.replace(f,"$1").replace(h,"$1"+X);break;default:return t.trim()+i.replace(h,"$1"+t.trim())}default:if(n*F>0&&i.indexOf("\f")>0)return i.replace(h,(58===t.charCodeAt(0)?"":"$1")+t.trim())}return t+i}function K(t,e,n,r){var u,l=0,h=t+";",f=2*e+3*n+4*r;if(944===f)return function(t){var e=t.length,n=t.indexOf(":",9)+1,r=t.substring(0,n).trim(),i=t.substring(n,e-1).trim();switch(t.charCodeAt(9)*H){case 0:break;case 45:if(110!==t.charCodeAt(10))break;default:var a=i.split((i="",s)),o=0;for(n=0,e=a.length;o64&&h<90||h>96&&h<123||95===h||45===h&&45!==u.charCodeAt(1)))switch(isNaN(parseFloat(u))+(-1!==u.indexOf("("))){case 1:switch(u){case"infinite":case"alternate":case"backwards":case"running":case"normal":case"forwards":case"both":case"none":case"linear":case"ease":case"ease-in":case"ease-out":case"ease-in-out":case"paused":case"reverse":case"alternate-reverse":case"inherit":case"initial":case"unset":case"step-start":case"step-end":break;default:u+=G}}l[n++]=u}i+=(0===o?"":",")+l.join(" ")}}return i=r+i+";",1===P||2===P&&tt(i,1)?M+i+i:i}(h);if(0===P||2===P&&!tt(h,1))return h;switch(f){case 1015:return 97===h.charCodeAt(10)?M+h+h:h;case 951:return 116===h.charCodeAt(3)?M+h+h:h;case 963:return 110===h.charCodeAt(5)?M+h+h:h;case 1009:if(100!==h.charCodeAt(4))break;case 969:case 942:return M+h+h;case 978:return M+h+O+h+h;case 1019:case 983:return M+h+O+h+D+h+h;case 883:return 45===h.charCodeAt(8)?M+h+h:h.indexOf("image-set(",11)>0?h.replace(A,"$1-webkit-$2")+h:h;case 932:if(45===h.charCodeAt(4))switch(h.charCodeAt(5)){case 103:return M+"box-"+h.replace("-grow","")+M+h+D+h.replace("grow","positive")+h;case 115:return M+h+D+h.replace("shrink","negative")+h;case 98:return M+h+D+h.replace("basis","preferred-size")+h}return M+h+D+h+h;case 964:return M+h+D+"flex-"+h+h;case 1023:if(99!==h.charCodeAt(8))break;return u=h.substring(h.indexOf(":",15)).replace("flex-","").replace("space-between","justify"),M+"box-pack"+u+M+h+D+"flex-pack"+u+h;case 1005:return a.test(h)?h.replace(i,":"+M)+h.replace(i,":"+O)+h:h;case 1e3:switch(l=(u=h.substring(13).trim()).indexOf("-")+1,u.charCodeAt(0)+u.charCodeAt(l)){case 226:u=h.replace(k,"tb");break;case 232:u=h.replace(k,"tb-rl");break;case 220:u=h.replace(k,"lr");break;default:return h}return M+h+D+u+h;case 1017:if(-1===h.indexOf("sticky",9))return h;case 975:switch(l=(h=t).length-10,f=(u=(33===h.charCodeAt(l)?h.substring(0,l):h).substring(t.indexOf(":",7)+1).trim()).charCodeAt(0)+(0|u.charCodeAt(7))){case 203:if(u.charCodeAt(8)<111)break;case 115:h=h.replace(u,M+u)+";"+h;break;case 207:case 102:h=h.replace(u,M+(f>102?"inline-":"")+"box")+";"+h.replace(u,M+u)+";"+h.replace(u,D+u+"box")+";"+h}return h+";";case 938:if(45===h.charCodeAt(5))switch(h.charCodeAt(6)){case 105:return u=h.replace("-items",""),M+h+M+"box-"+u+D+"flex-"+u+h;case 115:return M+h+D+"flex-item-"+h.replace(T,"")+h;default:return M+h+D+"flex-line-pack"+h.replace("align-content","").replace(T,"")+h}break;case 973:case 989:if(45!==h.charCodeAt(3)||122===h.charCodeAt(4))break;case 931:case 953:if(!0===S.test(t))return 115===(u=t.substring(t.indexOf(":")+1)).charCodeAt(0)?K(t.replace("stretch","fill-available"),e,n,r).replace(":fill-available",":stretch"):h.replace(u,M+u)+h.replace(u,O+u.replace("fill-",""))+h;break;case 962:if(h=M+h+(102===h.charCodeAt(5)?D+h:"")+h,n+r===211&&105===h.charCodeAt(13)&&h.indexOf("transform",10)>0)return h.substring(0,h.indexOf(";",27)+1).replace(o,"$1-webkit-$2")+h}return h}function tt(t,e){var n=t.indexOf(1===e?":":"{"),r=t.substring(0,3!==e?n:10),i=t.substring(n+1,t.length-1);return W(2!==e?r:r.replace(C,"$1"),i,e)}function et(t,e){var n=K(e,e.charCodeAt(0),e.charCodeAt(1),e.charCodeAt(2));return n!==e+";"?n.replace(E," or ($1)").substring(4):"("+e+")"}function nt(t,e,n,r,i,a,o,s,c,u){for(var l,h=0,f=e;h<$;++h)switch(l=U[h].call(at,t,f,n,r,i,a,o,s,c,u)){case void 0:case!1:case!0:case null:break;default:f=l}if(f!==e)return f}function rt(t,e,n,r){for(var i=e+1;i0&&(G=i.replace(d,91===a?"":"-")),a=1,1===F?X=i:q=i;var o,s=[X];$>0&&void 0!==(o=nt(-1,n,s,s,B,N,0,0,0,0))&&"string"==typeof o&&(n=o);var c=Z(z,s,n,0,0);return $>0&&void 0!==(o=nt(-2,c,s,s,B,N,c.length,0,0,0))&&"string"!=typeof(c=o)&&(a=0),G="",X="",q="",L=0,B=1,N=1,j*a==0?c:function(t){return t.replace(r,"").replace(v,"").replace(m,"$1").replace(b,"$1").replace(x," ")}(c)}return at.use=function t(e){switch(e){case void 0:case null:$=U.length=0;break;default:if("function"==typeof e)U[$++]=e;else if("object"==typeof e)for(var n=0,r=e.length;n=255?255:t<0?0:t},g:function(t){return t>=255?255:t<0?0:t},b:function(t){return t>=255?255:t<0?0:t},h:function(t){return t%360},s:function(t){return t>=100?100:t<0?0:t},l:function(t){return t>=100?100:t<0?0:t},a:function(t){return t>=1?1:t<0?0:t}},toLinear:function(t){var e=t/255;return t>.03928?Math.pow((e+.055)/1.055,2.4):e/12.92},hue2rgb:function(t,e,n){return n<0&&(n+=1),n>1&&(n-=1),n<1/6?t+6*(e-t)*n:n<.5?e:n<2/3?t+(e-t)*(2/3-n)*6:t},hsl2rgb:function(t,e){var n=t.h,i=t.s,a=t.l;if(100===i)return 2.55*a;n/=360,i/=100;var o=(a/=100)<.5?a*(1+i):a+i-a*i,s=2*a-o;switch(e){case"r":return 255*r.hue2rgb(s,o,n+1/3);case"g":return 255*r.hue2rgb(s,o,n);case"b":return 255*r.hue2rgb(s,o,n-1/3)}},rgb2hsl:function(t,e){var n=t.r,r=t.g,i=t.b;n/=255,r/=255,i/=255;var a=Math.max(n,r,i),o=Math.min(n,r,i),s=(a+o)/2;if("l"===e)return 100*s;if(a===o)return 0;var c=a-o;if("s"===e)return 100*(s>.5?c/(2-a-o):c/(a+o));switch(a){case n:return 60*((r-i)/c+(r1?e:"0"+e},dec2hex:function(t){var e=Math.round(t).toString(16);return e.length>1?e:"0"+e}};e.default=r},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(75),a=n(177),o=function(){function t(t,e){this.color=e,this.changed=!1,this.data=t,this.type=new a.default}return t.prototype.set=function(t,e){return this.color=e,this.changed=!1,this.data=t,this.type.type=i.TYPE.ALL,this},t.prototype._ensureHSL=function(){void 0===this.data.h&&(this.data.h=r.default.channel.rgb2hsl(this.data,"h")),void 0===this.data.s&&(this.data.s=r.default.channel.rgb2hsl(this.data,"s")),void 0===this.data.l&&(this.data.l=r.default.channel.rgb2hsl(this.data,"l"))},t.prototype._ensureRGB=function(){void 0===this.data.r&&(this.data.r=r.default.channel.hsl2rgb(this.data,"r")),void 0===this.data.g&&(this.data.g=r.default.channel.hsl2rgb(this.data,"g")),void 0===this.data.b&&(this.data.b=r.default.channel.hsl2rgb(this.data,"b"))},Object.defineProperty(t.prototype,"r",{get:function(){return this.type.is(i.TYPE.HSL)||void 0===this.data.r?(this._ensureHSL(),r.default.channel.hsl2rgb(this.data,"r")):this.data.r},set:function(t){this.type.set(i.TYPE.RGB),this.changed=!0,this.data.r=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"g",{get:function(){return this.type.is(i.TYPE.HSL)||void 0===this.data.g?(this._ensureHSL(),r.default.channel.hsl2rgb(this.data,"g")):this.data.g},set:function(t){this.type.set(i.TYPE.RGB),this.changed=!0,this.data.g=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"b",{get:function(){return this.type.is(i.TYPE.HSL)||void 0===this.data.b?(this._ensureHSL(),r.default.channel.hsl2rgb(this.data,"b")):this.data.b},set:function(t){this.type.set(i.TYPE.RGB),this.changed=!0,this.data.b=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"h",{get:function(){return this.type.is(i.TYPE.RGB)||void 0===this.data.h?(this._ensureRGB(),r.default.channel.rgb2hsl(this.data,"h")):this.data.h},set:function(t){this.type.set(i.TYPE.HSL),this.changed=!0,this.data.h=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"s",{get:function(){return this.type.is(i.TYPE.RGB)||void 0===this.data.s?(this._ensureRGB(),r.default.channel.rgb2hsl(this.data,"s")):this.data.s},set:function(t){this.type.set(i.TYPE.HSL),this.changed=!0,this.data.s=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"l",{get:function(){return this.type.is(i.TYPE.RGB)||void 0===this.data.l?(this._ensureRGB(),r.default.channel.rgb2hsl(this.data,"l")):this.data.l},set:function(t){this.type.set(i.TYPE.HSL),this.changed=!0,this.data.l=t},enumerable:!0,configurable:!0}),Object.defineProperty(t.prototype,"a",{get:function(){return this.data.a},set:function(t){this.changed=!0,this.data.a=t},enumerable:!0,configurable:!0}),t}();e.default=o},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(75),i=function(){function t(){this.type=r.TYPE.ALL}return t.prototype.get=function(){return this.type},t.prototype.set=function(t){if(this.type&&this.type!==t)throw new Error("Cannot change both RGB and HSL channels at the same time");this.type=t},t.prototype.reset=function(){this.type=r.TYPE.ALL},t.prototype.is=function(t){return this.type===t},t}();e.default=i},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i={};e.DEC2HEX=i;for(var a=0;a<=255;a++)i[a]=r.default.unit.dec2hex(a)},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(99),i={colors:{aliceblue:"#f0f8ff",antiquewhite:"#faebd7",aqua:"#00ffff",aquamarine:"#7fffd4",azure:"#f0ffff",beige:"#f5f5dc",bisque:"#ffe4c4",black:"#000000",blanchedalmond:"#ffebcd",blue:"#0000ff",blueviolet:"#8a2be2",brown:"#a52a2a",burlywood:"#deb887",cadetblue:"#5f9ea0",chartreuse:"#7fff00",chocolate:"#d2691e",coral:"#ff7f50",cornflowerblue:"#6495ed",cornsilk:"#fff8dc",crimson:"#dc143c",cyanaqua:"#00ffff",darkblue:"#00008b",darkcyan:"#008b8b",darkgoldenrod:"#b8860b",darkgray:"#a9a9a9",darkgreen:"#006400",darkgrey:"#a9a9a9",darkkhaki:"#bdb76b",darkmagenta:"#8b008b",darkolivegreen:"#556b2f",darkorange:"#ff8c00",darkorchid:"#9932cc",darkred:"#8b0000",darksalmon:"#e9967a",darkseagreen:"#8fbc8f",darkslateblue:"#483d8b",darkslategray:"#2f4f4f",darkslategrey:"#2f4f4f",darkturquoise:"#00ced1",darkviolet:"#9400d3",deeppink:"#ff1493",deepskyblue:"#00bfff",dimgray:"#696969",dimgrey:"#696969",dodgerblue:"#1e90ff",firebrick:"#b22222",floralwhite:"#fffaf0",forestgreen:"#228b22",fuchsia:"#ff00ff",gainsboro:"#dcdcdc",ghostwhite:"#f8f8ff",gold:"#ffd700",goldenrod:"#daa520",gray:"#808080",green:"#008000",greenyellow:"#adff2f",grey:"#808080",honeydew:"#f0fff0",hotpink:"#ff69b4",indianred:"#cd5c5c",indigo:"#4b0082",ivory:"#fffff0",khaki:"#f0e68c",lavender:"#e6e6fa",lavenderblush:"#fff0f5",lawngreen:"#7cfc00",lemonchiffon:"#fffacd",lightblue:"#add8e6",lightcoral:"#f08080",lightcyan:"#e0ffff",lightgoldenrodyellow:"#fafad2",lightgray:"#d3d3d3",lightgreen:"#90ee90",lightgrey:"#d3d3d3",lightpink:"#ffb6c1",lightsalmon:"#ffa07a",lightseagreen:"#20b2aa",lightskyblue:"#87cefa",lightslategray:"#778899",lightslategrey:"#778899",lightsteelblue:"#b0c4de",lightyellow:"#ffffe0",lime:"#00ff00",limegreen:"#32cd32",linen:"#faf0e6",magenta:"#ff00ff",maroon:"#800000",mediumaquamarine:"#66cdaa",mediumblue:"#0000cd",mediumorchid:"#ba55d3",mediumpurple:"#9370db",mediumseagreen:"#3cb371",mediumslateblue:"#7b68ee",mediumspringgreen:"#00fa9a",mediumturquoise:"#48d1cc",mediumvioletred:"#c71585",midnightblue:"#191970",mintcream:"#f5fffa",mistyrose:"#ffe4e1",moccasin:"#ffe4b5",navajowhite:"#ffdead",navy:"#000080",oldlace:"#fdf5e6",olive:"#808000",olivedrab:"#6b8e23",orange:"#ffa500",orangered:"#ff4500",orchid:"#da70d6",palegoldenrod:"#eee8aa",palegreen:"#98fb98",paleturquoise:"#afeeee",palevioletred:"#db7093",papayawhip:"#ffefd5",peachpuff:"#ffdab9",peru:"#cd853f",pink:"#ffc0cb",plum:"#dda0dd",powderblue:"#b0e0e6",purple:"#800080",rebeccapurple:"#663399",red:"#ff0000",rosybrown:"#bc8f8f",royalblue:"#4169e1",saddlebrown:"#8b4513",salmon:"#fa8072",sandybrown:"#f4a460",seagreen:"#2e8b57",seashell:"#fff5ee",sienna:"#a0522d",silver:"#c0c0c0",skyblue:"#87ceeb",slateblue:"#6a5acd",slategray:"#708090",slategrey:"#708090",snow:"#fffafa",springgreen:"#00ff7f",tan:"#d2b48c",teal:"#008080",thistle:"#d8bfd8",transparent:"#00000000",turquoise:"#40e0d0",violet:"#ee82ee",wheat:"#f5deb3",white:"#ffffff",whitesmoke:"#f5f5f5",yellow:"#ffff00",yellowgreen:"#9acd32"},parse:function(t){t=t.toLowerCase();var e=i.colors[t];if(e)return r.default.parse(e)},stringify:function(t){var e=r.default.stringify(t);for(var n in i.colors)if(i.colors[n]===e)return n}};e.default=i},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(45),a={re:/^rgba?\(\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?))\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?))\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?))(?:\s*?(?:,|\/)\s*?\+?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e\d+)?(%?)))?\s*?\)$/i,parse:function(t){var e=t.charCodeAt(0);if(114===e||82===e){var n=t.match(a.re);if(n){var o=n[1],s=n[2],c=n[3],u=n[4],l=n[5],h=n[6],f=n[7],d=n[8];return i.default.set({r:r.default.channel.clamp.r(s?2.55*parseFloat(o):parseFloat(o)),g:r.default.channel.clamp.g(u?2.55*parseFloat(c):parseFloat(c)),b:r.default.channel.clamp.b(h?2.55*parseFloat(l):parseFloat(l)),a:f?r.default.channel.clamp.a(d?parseFloat(f)/100:parseFloat(f)):1},t)}}},stringify:function(t){return t.a<1?"rgba("+r.default.lang.round(t.r)+", "+r.default.lang.round(t.g)+", "+r.default.lang.round(t.b)+", "+r.default.lang.round(t.a)+")":"rgb("+r.default.lang.round(t.r)+", "+r.default.lang.round(t.g)+", "+r.default.lang.round(t.b)+")"}};e.default=a},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(45),a={re:/^hsla?\(\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?(?:deg|grad|rad|turn)?)\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?%)\s*?(?:,|\s)\s*?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?%)(?:\s*?(?:,|\/)\s*?\+?(-?(?:\d+(?:\.\d+)?|(?:\.\d+))(?:e-?\d+)?(%)?))?\s*?\)$/i,hueRe:/^(.+?)(deg|grad|rad|turn)$/i,_hue2deg:function(t){var e=t.match(a.hueRe);if(e){var n=e[1];switch(e[2]){case"grad":return r.default.channel.clamp.h(.9*parseFloat(n));case"rad":return r.default.channel.clamp.h(180*parseFloat(n)/Math.PI);case"turn":return r.default.channel.clamp.h(360*parseFloat(n))}}return r.default.channel.clamp.h(parseFloat(t))},parse:function(t){var e=t.charCodeAt(0);if(104===e||72===e){var n=t.match(a.re);if(n){var o=n[1],s=n[2],c=n[3],u=n[4],l=n[5];return i.default.set({h:a._hue2deg(o),s:r.default.channel.clamp.s(parseFloat(s)),l:r.default.channel.clamp.l(parseFloat(c)),a:u?r.default.channel.clamp.a(l?parseFloat(u)/100:parseFloat(u)):1},t)}}},stringify:function(t){return t.a<1?"hsla("+r.default.lang.round(t.h)+", "+r.default.lang.round(t.s)+"%, "+r.default.lang.round(t.l)+"%, "+t.a+")":"hsl("+r.default.lang.round(t.h)+", "+r.default.lang.round(t.s)+"%, "+r.default.lang.round(t.l)+"%)"}};e.default=a},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"r")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"g")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"b")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"h")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"s")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(29);e.default=function(t){return r.default(t,"l")}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(103);e.default=function(t){return!r.default(t)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(15);e.default=function(t){try{return r.default.parse(t),!0}catch(t){return!1}}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"s",e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"s",-e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"l",e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t,e){return r.default(t,"l",-e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(32);e.default=function(t){return r.default(t,"h",180)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(52);e.default=function(t){return r.default(t,{s:0})}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(15),i=n(107);e.default=function(t,e){void 0===e&&(e=100);var n=r.default.parse(t);return n.r=255-n.r,n.g=255-n.g,n.b=255-n.b,i.default(n,t,e)}},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var r=n(9),i=n(15),a=n(106);e.default=function(t,e){var n,o,s,c=i.default.parse(t),u={};for(var l in e)u[l]=(n=c[l],o=e[l],s=r.default.channel.max[l],o>0?(s-n)*o/100:n*o/100);return a.default(t,u)}},function(t,e,n){t.exports={Graph:n(76),version:n(300)}},function(t,e,n){var r=n(108);t.exports=function(t){return r(t,4)}},function(t,e){t.exports=function(){this.__data__=[],this.size=0}},function(t,e,n){var r=n(55),i=Array.prototype.splice;t.exports=function(t){var e=this.__data__,n=r(e,t);return!(n<0)&&(n==e.length-1?e.pop():i.call(e,n,1),--this.size,!0)}},function(t,e,n){var r=n(55);t.exports=function(t){var e=this.__data__,n=r(e,t);return n<0?void 0:e[n][1]}},function(t,e,n){var r=n(55);t.exports=function(t){return r(this.__data__,t)>-1}},function(t,e,n){var r=n(55);t.exports=function(t,e){var n=this.__data__,i=r(n,t);return i<0?(++this.size,n.push([t,e])):n[i][1]=e,this}},function(t,e,n){var r=n(54);t.exports=function(){this.__data__=new r,this.size=0}},function(t,e){t.exports=function(t){var e=this.__data__,n=e.delete(t);return this.size=e.size,n}},function(t,e){t.exports=function(t){return this.__data__.get(t)}},function(t,e){t.exports=function(t){return this.__data__.has(t)}},function(t,e,n){var r=n(54),i=n(77),a=n(78);t.exports=function(t,e){var n=this.__data__;if(n instanceof r){var o=n.__data__;if(!i||o.length<199)return o.push([t,e]),this.size=++n.size,this;n=this.__data__=new a(o)}return n.set(t,e),this.size=n.size,this}},function(t,e,n){var r=n(37),i=n(214),a=n(11),o=n(110),s=/^\[object .+?Constructor\]$/,c=Function.prototype,u=Object.prototype,l=c.toString,h=u.hasOwnProperty,f=RegExp("^"+l.call(h).replace(/[\\^$.*+?()[\]{}|]/g,"\\$&").replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g,"$1.*?")+"$");t.exports=function(t){return!(!a(t)||i(t))&&(r(t)?f:s).test(o(t))}},function(t,e){var n;n=function(){return this}();try{n=n||new Function("return this")()}catch(t){"object"==typeof window&&(n=window)}t.exports=n},function(t,e,n){var r=n(38),i=Object.prototype,a=i.hasOwnProperty,o=i.toString,s=r?r.toStringTag:void 0;t.exports=function(t){var e=a.call(t,s),n=t[s];try{t[s]=void 0;var r=!0}catch(t){}var i=o.call(t);return r&&(e?t[s]=n:delete t[s]),i}},function(t,e){var n=Object.prototype.toString;t.exports=function(t){return n.call(t)}},function(t,e,n){var r,i=n(215),a=(r=/[^.]+$/.exec(i&&i.keys&&i.keys.IE_PROTO||""))?"Symbol(src)_1."+r:"";t.exports=function(t){return!!a&&a in t}},function(t,e,n){var r=n(16)["__core-js_shared__"];t.exports=r},function(t,e){t.exports=function(t,e){return null==t?void 0:t[e]}},function(t,e,n){var r=n(218),i=n(54),a=n(77);t.exports=function(){this.size=0,this.__data__={hash:new r,map:new(a||i),string:new r}}},function(t,e,n){var r=n(219),i=n(220),a=n(221),o=n(222),s=n(223);function c(t){var e=-1,n=null==t?0:t.length;for(this.clear();++e0){if(++e>=800)return arguments[0]}else e=0;return t.apply(void 0,arguments)}}},function(t,e,n){var r=n(131),i=n(292),a=n(296),o=n(132),s=n(297),c=n(90);t.exports=function(t,e,n){var u=-1,l=i,h=t.length,f=!0,d=[],p=d;if(n)f=!1,l=a;else if(h>=200){var g=e?null:s(t);if(g)return c(g);f=!1,l=o,p=new r}else p=e?[]:d;t:for(;++u-1}},function(t,e,n){var r=n(145),i=n(294),a=n(295);t.exports=function(t,e,n){return e==e?a(t,e,n):r(t,i,n)}},function(t,e){t.exports=function(t){return t!=t}},function(t,e){t.exports=function(t,e,n){for(var r=n-1,i=t.length;++r1||1===e.length&&t.hasEdge(e[0],e[0])}))}},function(t,e,n){var r=n(10);t.exports=function(t,e,n){return function(t,e,n){var r={},i=t.nodes();return i.forEach((function(t){r[t]={},r[t][t]={distance:0},i.forEach((function(e){t!==e&&(r[t][e]={distance:Number.POSITIVE_INFINITY})})),n(t).forEach((function(n){var i=n.v===t?n.w:n.v,a=e(n);r[t][i]={distance:a,predecessor:t}}))})),i.forEach((function(t){var e=r[t];i.forEach((function(n){var a=r[n];i.forEach((function(n){var r=a[t],i=e[n],o=a[n],s=r.distance+i.distance;s0;){if(n=c.removeMin(),r.has(s,n))o.setEdge(n,s[n]);else{if(l)throw new Error("Input graph is not connected: "+t);l=!0}t.nodeEdges(n).forEach(u)}return o}},function(t,e,n){var r;try{r=n(3)}catch(t){}r||(r=window.graphlib),t.exports=r},function(t,e,n){"use strict";var r=n(4),i=n(345),a=n(348),o=n(349),s=n(8).normalizeRanks,c=n(351),u=n(8).removeEmptyRanks,l=n(352),h=n(353),f=n(354),d=n(355),p=n(364),g=n(8),y=n(17).Graph;t.exports=function(t,e){var n=e&&e.debugTiming?g.time:g.notime;n("layout",(function(){var e=n(" buildLayoutGraph",(function(){return function(t){var e=new y({multigraph:!0,compound:!0}),n=C(t.graph());return e.setGraph(r.merge({},m,T(n,v),r.pick(n,b))),r.forEach(t.nodes(),(function(n){var i=C(t.node(n));e.setNode(n,r.defaults(T(i,x),_)),e.setParent(n,t.parent(n))})),r.forEach(t.edges(),(function(n){var i=C(t.edge(n));e.setEdge(n,r.merge({},w,T(i,k),r.pick(i,E)))})),e}(t)}));n(" runLayout",(function(){!function(t,e){e(" makeSpaceForEdgeLabels",(function(){!function(t){var e=t.graph();e.ranksep/=2,r.forEach(t.edges(),(function(n){var r=t.edge(n);r.minlen*=2,"c"!==r.labelpos.toLowerCase()&&("TB"===e.rankdir||"BT"===e.rankdir?r.width+=r.labeloffset:r.height+=r.labeloffset)}))}(t)})),e(" removeSelfEdges",(function(){!function(t){r.forEach(t.edges(),(function(e){if(e.v===e.w){var n=t.node(e.v);n.selfEdges||(n.selfEdges=[]),n.selfEdges.push({e:e,label:t.edge(e)}),t.removeEdge(e)}}))}(t)})),e(" acyclic",(function(){i.run(t)})),e(" nestingGraph.run",(function(){l.run(t)})),e(" rank",(function(){o(g.asNonCompoundGraph(t))})),e(" injectEdgeLabelProxies",(function(){!function(t){r.forEach(t.edges(),(function(e){var n=t.edge(e);if(n.width&&n.height){var r=t.node(e.v),i={rank:(t.node(e.w).rank-r.rank)/2+r.rank,e:e};g.addDummyNode(t,"edge-proxy",i,"_ep")}}))}(t)})),e(" removeEmptyRanks",(function(){u(t)})),e(" nestingGraph.cleanup",(function(){l.cleanup(t)})),e(" normalizeRanks",(function(){s(t)})),e(" assignRankMinMax",(function(){!function(t){var e=0;r.forEach(t.nodes(),(function(n){var i=t.node(n);i.borderTop&&(i.minRank=t.node(i.borderTop).rank,i.maxRank=t.node(i.borderBottom).rank,e=r.max(e,i.maxRank))})),t.graph().maxRank=e}(t)})),e(" removeEdgeLabelProxies",(function(){!function(t){r.forEach(t.nodes(),(function(e){var n=t.node(e);"edge-proxy"===n.dummy&&(t.edge(n.e).labelRank=n.rank,t.removeNode(e))}))}(t)})),e(" normalize.run",(function(){a.run(t)})),e(" parentDummyChains",(function(){c(t)})),e(" addBorderSegments",(function(){h(t)})),e(" order",(function(){d(t)})),e(" insertSelfEdges",(function(){!function(t){var e=g.buildLayerMatrix(t);r.forEach(e,(function(e){var n=0;r.forEach(e,(function(e,i){var a=t.node(e);a.order=i+n,r.forEach(a.selfEdges,(function(e){g.addDummyNode(t,"selfedge",{width:e.label.width,height:e.label.height,rank:a.rank,order:i+ ++n,e:e.e,label:e.label},"_se")})),delete a.selfEdges}))}))}(t)})),e(" adjustCoordinateSystem",(function(){f.adjust(t)})),e(" position",(function(){p(t)})),e(" positionSelfEdges",(function(){!function(t){r.forEach(t.nodes(),(function(e){var n=t.node(e);if("selfedge"===n.dummy){var r=t.node(n.e.v),i=r.x+r.width/2,a=r.y,o=n.x-i,s=r.height/2;t.setEdge(n.e,n.label),t.removeNode(e),n.label.points=[{x:i+2*o/3,y:a-s},{x:i+5*o/6,y:a-s},{x:i+o,y:a},{x:i+5*o/6,y:a+s},{x:i+2*o/3,y:a+s}],n.label.x=n.x,n.label.y=n.y}}))}(t)})),e(" removeBorderNodes",(function(){!function(t){r.forEach(t.nodes(),(function(e){if(t.children(e).length){var n=t.node(e),i=t.node(n.borderTop),a=t.node(n.borderBottom),o=t.node(r.last(n.borderLeft)),s=t.node(r.last(n.borderRight));n.width=Math.abs(s.x-o.x),n.height=Math.abs(a.y-i.y),n.x=o.x+n.width/2,n.y=i.y+n.height/2}})),r.forEach(t.nodes(),(function(e){"border"===t.node(e).dummy&&t.removeNode(e)}))}(t)})),e(" normalize.undo",(function(){a.undo(t)})),e(" fixupEdgeLabelCoords",(function(){!function(t){r.forEach(t.edges(),(function(e){var n=t.edge(e);if(r.has(n,"x"))switch("l"!==n.labelpos&&"r"!==n.labelpos||(n.width-=n.labeloffset),n.labelpos){case"l":n.x-=n.width/2+n.labeloffset;break;case"r":n.x+=n.width/2+n.labeloffset}}))}(t)})),e(" undoCoordinateSystem",(function(){f.undo(t)})),e(" translateGraph",(function(){!function(t){var e=Number.POSITIVE_INFINITY,n=0,i=Number.POSITIVE_INFINITY,a=0,o=t.graph(),s=o.marginx||0,c=o.marginy||0;function u(t){var r=t.x,o=t.y,s=t.width,c=t.height;e=Math.min(e,r-s/2),n=Math.max(n,r+s/2),i=Math.min(i,o-c/2),a=Math.max(a,o+c/2)}r.forEach(t.nodes(),(function(e){u(t.node(e))})),r.forEach(t.edges(),(function(e){var n=t.edge(e);r.has(n,"x")&&u(n)})),e-=s,i-=c,r.forEach(t.nodes(),(function(n){var r=t.node(n);r.x-=e,r.y-=i})),r.forEach(t.edges(),(function(n){var a=t.edge(n);r.forEach(a.points,(function(t){t.x-=e,t.y-=i})),r.has(a,"x")&&(a.x-=e),r.has(a,"y")&&(a.y-=i)})),o.width=n-e+s,o.height=a-i+c}(t)})),e(" assignNodeIntersects",(function(){!function(t){r.forEach(t.edges(),(function(e){var n,r,i=t.edge(e),a=t.node(e.v),o=t.node(e.w);i.points?(n=i.points[0],r=i.points[i.points.length-1]):(i.points=[],n=o,r=a),i.points.unshift(g.intersectRect(a,n)),i.points.push(g.intersectRect(o,r))}))}(t)})),e(" reversePoints",(function(){!function(t){r.forEach(t.edges(),(function(e){var n=t.edge(e);n.reversed&&n.points.reverse()}))}(t)})),e(" acyclic.undo",(function(){i.undo(t)}))}(e,n)})),n(" updateInputGraph",(function(){!function(t,e){r.forEach(t.nodes(),(function(n){var r=t.node(n),i=e.node(n);r&&(r.x=i.x,r.y=i.y,e.children(n).length&&(r.width=i.width,r.height=i.height))})),r.forEach(t.edges(),(function(n){var i=t.edge(n),a=e.edge(n);i.points=a.points,r.has(a,"x")&&(i.x=a.x,i.y=a.y)})),t.graph().width=e.graph().width,t.graph().height=e.graph().height}(t,e)}))}))};var v=["nodesep","edgesep","ranksep","marginx","marginy"],m={ranksep:50,edgesep:20,nodesep:50,rankdir:"tb"},b=["acyclicer","ranker","rankdir","align"],x=["width","height"],_={width:0,height:0},k=["minlen","weight","width","height","labeloffset"],w={minlen:1,weight:1,width:0,height:0,labeloffset:10,labelpos:"r"},E=["labelpos"];function T(t,e){return r.mapValues(r.pick(t,e),Number)}function C(t){var e={};return r.forEach(t,(function(t,n){e[n.toLowerCase()]=t})),e}},function(t,e,n){var r=n(108);t.exports=function(t){return r(t,5)}},function(t,e,n){var r=n(315)(n(316));t.exports=r},function(t,e,n){var r=n(25),i=n(24),a=n(30);t.exports=function(t){return function(e,n,o){var s=Object(e);if(!i(e)){var c=r(n,3);e=a(e),n=function(t){return c(s[t],t,s)}}var u=t(e,n,o);return u>-1?s[c?e[u]:u]:void 0}}},function(t,e,n){var r=n(145),i=n(25),a=n(317),o=Math.max;t.exports=function(t,e,n){var s=null==t?0:t.length;if(!s)return-1;var c=null==n?0:a(n);return c<0&&(c=o(s+c,0)),r(t,i(e,3),c)}},function(t,e,n){var r=n(155);t.exports=function(t){var e=r(t),n=e%1;return e==e?n?e-n:e:0}},function(t,e,n){var r=n(11),i=n(42),a=/^\s+|\s+$/g,o=/^[-+]0x[0-9a-f]+$/i,s=/^0b[01]+$/i,c=/^0o[0-7]+$/i,u=parseInt;t.exports=function(t){if("number"==typeof t)return t;if(i(t))return NaN;if(r(t)){var e="function"==typeof t.valueOf?t.valueOf():t;t=r(e)?e+"":e}if("string"!=typeof t)return 0===t?t:+t;t=t.replace(a,"");var n=s.test(t);return n||c.test(t)?u(t.slice(2),n?2:8):o.test(t)?NaN:+t}},function(t,e,n){var r=n(89),i=n(127),a=n(40);t.exports=function(t,e){return null==t?t:r(t,i(e),a)}},function(t,e){t.exports=function(t){var e=null==t?0:t.length;return e?t[e-1]:void 0}},function(t,e,n){var r=n(59),i=n(88),a=n(25);t.exports=function(t,e){var n={};return e=a(e,3),i(t,(function(t,i,a){r(n,i,e(t,i,a))})),n}},function(t,e,n){var r=n(95),i=n(323),a=n(35);t.exports=function(t){return t&&t.length?r(t,a,i):void 0}},function(t,e){t.exports=function(t,e){return t>e}},function(t,e,n){var r=n(325),i=n(328)((function(t,e,n){r(t,e,n)}));t.exports=i},function(t,e,n){var r=n(53),i=n(157),a=n(89),o=n(326),s=n(11),c=n(40),u=n(159);t.exports=function t(e,n,l,h,f){e!==n&&a(n,(function(a,c){if(f||(f=new r),s(a))o(e,n,c,l,t,h,f);else{var d=h?h(u(e,c),a,c+"",e,n,f):void 0;void 0===d&&(d=a),i(e,c,d)}}),c)}},function(t,e,n){var r=n(157),i=n(114),a=n(123),o=n(115),s=n(124),c=n(47),u=n(5),l=n(146),h=n(39),f=n(37),d=n(11),p=n(158),g=n(48),y=n(159),v=n(327);t.exports=function(t,e,n,m,b,x,_){var k=y(t,n),w=y(e,n),E=_.get(w);if(E)r(t,n,E);else{var T=x?x(k,w,n+"",t,e,_):void 0,C=void 0===T;if(C){var S=u(w),A=!S&&h(w),M=!S&&!A&&g(w);T=w,S||A||M?u(k)?T=k:l(k)?T=o(k):A?(C=!1,T=i(w,!0)):M?(C=!1,T=a(w,!0)):T=[]:p(w)||c(w)?(T=k,c(k)?T=v(k):d(k)&&!f(k)||(T=s(w))):C=!1}C&&(_.set(w,T),b(T,w,m,x,_),_.delete(w)),r(t,n,T)}}},function(t,e,n){var r=n(46),i=n(40);t.exports=function(t){return r(t,i(t))}},function(t,e,n){var r=n(67),i=n(68);t.exports=function(t){return r((function(e,n){var r=-1,a=n.length,o=a>1?n[a-1]:void 0,s=a>2?n[2]:void 0;for(o=t.length>3&&"function"==typeof o?(a--,o):void 0,s&&i(n[0],n[1],s)&&(o=a<3?void 0:o,a=1),e=Object(e);++r1&&o(t,e[0],e[1])?e=[]:n>2&&o(e[0],e[1],e[2])&&(e=[e[0]]),i(t,r(e,1),[])}));t.exports=s},function(t,e,n){var r=n(66),i=n(25),a=n(141),o=n(340),s=n(61),c=n(341),u=n(35);t.exports=function(t,e,n){var l=-1;e=r(e.length?e:[u],s(i));var h=a(t,(function(t,n,i){return{criteria:r(e,(function(e){return e(t)})),index:++l,value:t}}));return o(h,(function(t,e){return c(t,e,n)}))}},function(t,e){t.exports=function(t,e){var n=t.length;for(t.sort(e);n--;)t[n]=t[n].value;return t}},function(t,e,n){var r=n(342);t.exports=function(t,e,n){for(var i=-1,a=t.criteria,o=e.criteria,s=a.length,c=n.length;++i=c?u:u*("desc"==n[i]?-1:1)}return t.index-e.index}},function(t,e,n){var r=n(42);t.exports=function(t,e){if(t!==e){var n=void 0!==t,i=null===t,a=t==t,o=r(t),s=void 0!==e,c=null===e,u=e==e,l=r(e);if(!c&&!l&&!o&&t>e||o&&s&&u&&!c&&!l||i&&s&&u||!n&&u||!a)return 1;if(!i&&!o&&!l&&t0;--c)if(r=e[c].dequeue()){i=i.concat(s(t,e,n,r,!0));break}}return i}(n.graph,n.buckets,n.zeroIdx);return r.flatten(r.map(u,(function(e){return t.outEdges(e.v,e.w)})),!0)};var o=r.constant(1);function s(t,e,n,i,a){var o=a?[]:void 0;return r.forEach(t.inEdges(i.v),(function(r){var i=t.edge(r),s=t.node(r.v);a&&o.push({v:r.v,w:r.w}),s.out-=i,c(e,n,s)})),r.forEach(t.outEdges(i.v),(function(r){var i=t.edge(r),a=r.w,o=t.node(a);o.in-=i,c(e,n,o)})),t.removeNode(i.v),o}function c(t,e,n){n.out?n.in?t[n.out-n.in+e].enqueue(n):t[t.length-1].enqueue(n):t[0].enqueue(n)}},function(t,e){function n(){var t={};t._next=t._prev=t,this._sentinel=t}function r(t){t._prev._next=t._next,t._next._prev=t._prev,delete t._next,delete t._prev}function i(t,e){if("_next"!==t&&"_prev"!==t)return e}t.exports=n,n.prototype.dequeue=function(){var t=this._sentinel,e=t._prev;if(e!==t)return r(e),e},n.prototype.enqueue=function(t){var e=this._sentinel;t._prev&&t._next&&r(t),t._next=e._next,e._next._prev=t,e._next=t,t._prev=e},n.prototype.toString=function(){for(var t=[],e=this._sentinel,n=e._prev;n!==e;)t.push(JSON.stringify(n,i)),n=n._prev;return"["+t.join(", ")+"]"}},function(t,e,n){"use strict";var r=n(4),i=n(8);t.exports={run:function(t){t.graph().dummyChains=[],r.forEach(t.edges(),(function(e){!function(t,e){var n,r,a,o=e.v,s=t.node(o).rank,c=e.w,u=t.node(c).rank,l=e.name,h=t.edge(e),f=h.labelRank;if(u===s+1)return;for(t.removeEdge(e),a=0,++s;sc.lim&&(u=c,l=!0);var h=r.filter(e.edges(),(function(e){return l===m(t,t.node(e.v),u)&&l!==m(t,t.node(e.w),u)}));return r.minBy(h,(function(t){return a(e,t)}))}function v(t,e,n,i){var a=n.v,o=n.w;t.removeEdge(a,o),t.setEdge(i.v,i.w,{}),d(t),h(t,e),function(t,e){var n=r.find(t.nodes(),(function(t){return!e.node(t).parent})),i=s(t,n);i=i.slice(1),r.forEach(i,(function(n){var r=t.node(n).parent,i=e.edge(n,r),a=!1;i||(i=e.edge(r,n),a=!0),e.node(n).rank=e.node(r).rank+(a?i.minlen:-i.minlen)}))}(t,e)}function m(t,e,n){return n.low<=e.lim&&e.lim<=n.lim}t.exports=l,l.initLowLimValues=d,l.initCutValues=h,l.calcCutValue=f,l.leaveEdge=g,l.enterEdge=y,l.exchangeEdges=v},function(t,e,n){var r=n(4);t.exports=function(t){var e=function(t){var e={},n=0;function i(a){var o=n;r.forEach(t.children(a),i),e[a]={low:o,lim:n++}}return r.forEach(t.children(),i),e}(t);r.forEach(t.graph().dummyChains,(function(n){for(var r=t.node(n),i=r.edgeObj,a=function(t,e,n,r){var i,a,o=[],s=[],c=Math.min(e[n].low,e[r].low),u=Math.max(e[n].lim,e[r].lim);i=n;do{i=t.parent(i),o.push(i)}while(i&&(e[i].low>c||u>e[i].lim));a=i,i=r;for(;(i=t.parent(i))!==a;)s.push(i);return{path:o.concat(s.reverse()),lca:a}}(t,e,i.v,i.w),o=a.path,s=a.lca,c=0,u=o[c],l=!0;n!==i.w;){if(r=t.node(n),l){for(;(u=o[c])!==s&&t.node(u).maxRank=2),s=l.buildLayerMatrix(t);var y=a(t,s);y0;)e%2&&(n+=c[e+1]),c[e=e-1>>1]+=t.weight;u+=t.weight*n}))),u}t.exports=function(t,e){for(var n=0,r=1;r=t.barycenter)&&function(t,e){var n=0,r=0;t.weight&&(n+=t.barycenter*t.weight,r+=t.weight);e.weight&&(n+=e.barycenter*e.weight,r+=e.weight);t.vs=e.vs.concat(t.vs),t.barycenter=n/r,t.weight=r,t.i=Math.min(e.i,t.i),e.merged=!0}(t,e)}}function i(e){return function(n){n.in.push(e),0==--n.indegree&&t.push(n)}}for(;t.length;){var a=t.pop();e.push(a),r.forEach(a.in.reverse(),n(a)),r.forEach(a.out,i(a))}return r.map(r.filter(e,(function(t){return!t.merged})),(function(t){return r.pick(t,["vs","i","barycenter","weight"])}))}(r.filter(n,(function(t){return!t.indegree})))}},function(t,e,n){var r=n(4),i=n(8);function a(t,e,n){for(var i;e.length&&(i=r.last(e)).i<=n;)e.pop(),t.push(i.vs),n++;return n}t.exports=function(t,e){var n=i.partition(t,(function(t){return r.has(t,"barycenter")})),o=n.lhs,s=r.sortBy(n.rhs,(function(t){return-t.i})),c=[],u=0,l=0,h=0;o.sort((f=!!e,function(t,e){return t.barycentere.barycenter?1:f?e.i-t.i:t.i-e.i})),h=a(c,s,h),r.forEach(o,(function(t){h+=t.vs.length,c.push(t.vs),u+=t.barycenter*t.weight,l+=t.weight,h=a(c,s,h)}));var f;var d={vs:r.flatten(c,!0)};l&&(d.barycenter=u/l,d.weight=l);return d}},function(t,e,n){var r=n(4),i=n(17).Graph;t.exports=function(t,e,n){var a=function(t){var e;for(;t.hasNode(e=r.uniqueId("_root")););return e}(t),o=new i({compound:!0}).setGraph({root:a}).setDefaultNodeLabel((function(e){return t.node(e)}));return r.forEach(t.nodes(),(function(i){var s=t.node(i),c=t.parent(i);(s.rank===e||s.minRank<=e&&e<=s.maxRank)&&(o.setNode(i),o.setParent(i,c||a),r.forEach(t[n](i),(function(e){var n=e.v===i?e.w:e.v,a=o.edge(n,i),s=r.isUndefined(a)?0:a.weight;o.setEdge(n,i,{weight:t.edge(e).weight+s})})),r.has(s,"minRank")&&o.setNode(i,{borderLeft:s.borderLeft[e],borderRight:s.borderRight[e]}))})),o}},function(t,e,n){var r=n(4);t.exports=function(t,e,n){var i,a={};r.forEach(n,(function(n){for(var r,o,s=t.parent(n);s;){if((r=t.parent(s))?(o=a[r],a[r]=s):(o=i,i=s),o&&o!==s)return void e.setEdge(o,s);s=r}}))}},function(t,e,n){"use strict";var r=n(4),i=n(8),a=n(365).positionX;t.exports=function(t){(function(t){var e=i.buildLayerMatrix(t),n=t.graph().ranksep,a=0;r.forEach(e,(function(e){var i=r.max(r.map(e,(function(e){return t.node(e).height})));r.forEach(e,(function(e){t.node(e).y=a+i/2})),a+=i+n}))})(t=i.asNonCompoundGraph(t)),r.forEach(a(t),(function(e,n){t.node(n).x=e}))}},function(t,e,n){"use strict";var r=n(4),i=n(17).Graph,a=n(8);function o(t,e){var n={};return r.reduce(e,(function(e,i){var a=0,o=0,s=e.length,u=r.last(i);return r.forEach(i,(function(e,l){var h=function(t,e){if(t.node(e).dummy)return r.find(t.predecessors(e),(function(e){return t.node(e).dummy}))}(t,e),f=h?t.node(h).order:s;(h||e===u)&&(r.forEach(i.slice(o,l+1),(function(e){r.forEach(t.predecessors(e),(function(r){var i=t.node(r),o=i.order;!(os)&&c(n,e,u)}))}))}return r.reduce(e,(function(e,n){var a,o=-1,s=0;return r.forEach(n,(function(r,c){if("border"===t.node(r).dummy){var u=t.predecessors(r);u.length&&(a=t.node(u[0]).order,i(n,s,c,o,a),s=c,o=a)}i(n,s,n.length,a,e.length)})),n})),n}function c(t,e,n){if(e>n){var r=e;e=n,n=r}var i=t[e];i||(t[e]=i={}),i[n]=!0}function u(t,e,n){if(e>n){var i=e;e=n,n=i}return r.has(t[e],n)}function l(t,e,n,i){var a={},o={},s={};return r.forEach(e,(function(t){r.forEach(t,(function(t,e){a[t]=t,o[t]=t,s[t]=e}))})),r.forEach(e,(function(t){var e=-1;r.forEach(t,(function(t){var c=i(t);if(c.length)for(var l=((c=r.sortBy(c,(function(t){return s[t]}))).length-1)/2,h=Math.floor(l),f=Math.ceil(l);h<=f;++h){var d=c[h];o[t]===t&&e0}t.exports=function(t,e,r,i){var a,o,s,c,u,l,h,f,d,p,g,y,v;if(a=e.y-t.y,s=t.x-e.x,u=e.x*t.y-t.x*e.y,d=a*r.x+s*r.y+u,p=a*i.x+s*i.y+u,0!==d&&0!==p&&n(d,p))return;if(o=i.y-r.y,c=r.x-i.x,l=i.x*r.y-r.x*i.y,h=o*t.x+c*t.y+l,f=o*e.x+c*e.y+l,0!==h&&0!==f&&n(h,f))return;if(0===(g=a*c-o*s))return;return y=Math.abs(g/2),{x:(v=s*l-c*u)<0?(v-y)/g:(v+y)/g,y:(v=o*u-a*l)<0?(v-y)/g:(v+y)/g}}},function(t,e,n){var r=n(43),i=n(31),a=n(153).layout;t.exports=function(){var t=n(371),e=n(374),i=n(375),u=n(376),l=n(377),h=n(378),f=n(379),d=n(380),p=n(381),g=function(n,g){!function(t){t.nodes().forEach((function(e){var n=t.node(e);r.has(n,"label")||t.children(e).length||(n.label=e),r.has(n,"paddingX")&&r.defaults(n,{paddingLeft:n.paddingX,paddingRight:n.paddingX}),r.has(n,"paddingY")&&r.defaults(n,{paddingTop:n.paddingY,paddingBottom:n.paddingY}),r.has(n,"padding")&&r.defaults(n,{paddingLeft:n.padding,paddingRight:n.padding,paddingTop:n.padding,paddingBottom:n.padding}),r.defaults(n,o),r.each(["paddingLeft","paddingRight","paddingTop","paddingBottom"],(function(t){n[t]=Number(n[t])})),r.has(n,"width")&&(n._prevWidth=n.width),r.has(n,"height")&&(n._prevHeight=n.height)})),t.edges().forEach((function(e){var n=t.edge(e);r.has(n,"label")||(n.label=""),r.defaults(n,s)}))}(g);var y=c(n,"output"),v=c(y,"clusters"),m=c(y,"edgePaths"),b=i(c(y,"edgeLabels"),g),x=t(c(y,"nodes"),g,d);a(g),l(x,g),h(b,g),u(m,g,p);var _=e(v,g);f(_,g),function(t){r.each(t.nodes(),(function(e){var n=t.node(e);r.has(n,"_prevWidth")?n.width=n._prevWidth:delete n.width,r.has(n,"_prevHeight")?n.height=n._prevHeight:delete n.height,delete n._prevWidth,delete n._prevHeight}))}(g)};return g.createNodes=function(e){return arguments.length?(t=e,g):t},g.createClusters=function(t){return arguments.length?(e=t,g):e},g.createEdgeLabels=function(t){return arguments.length?(i=t,g):i},g.createEdgePaths=function(t){return arguments.length?(u=t,g):u},g.shapes=function(t){return arguments.length?(d=t,g):d},g.arrows=function(t){return arguments.length?(p=t,g):p},g};var o={paddingLeft:10,paddingRight:10,paddingTop:10,paddingBottom:10,rx:0,ry:0,shape:"rect"},s={arrowhead:"normal",curve:i.curveLinear};function c(t,e){var n=t.select("g."+e);return n.empty()&&(n=t.append("g").attr("class",e)),n}},function(t,e,n){"use strict";var r=n(43),i=n(97),a=n(12),o=n(31);t.exports=function(t,e,n){var s,c=e.nodes().filter((function(t){return!a.isSubgraph(e,t)})),u=t.selectAll("g.node").data(c,(function(t){return t})).classed("update",!0);u.exit().remove(),u.enter().append("g").attr("class","node").style("opacity",0),(u=t.selectAll("g.node")).each((function(t){var s=e.node(t),c=o.select(this);a.applyClass(c,s.class,(c.classed("update")?"update ":"")+"node"),c.select("g.label").remove();var u=c.append("g").attr("class","label"),l=i(u,s),h=n[s.shape],f=r.pick(l.node().getBBox(),"width","height");s.elem=this,s.id&&c.attr("id",s.id),s.labelId&&u.attr("id",s.labelId),r.has(s,"width")&&(f.width=s.width),r.has(s,"height")&&(f.height=s.height),f.width+=s.paddingLeft+s.paddingRight,f.height+=s.paddingTop+s.paddingBottom,u.attr("transform","translate("+(s.paddingLeft-s.paddingRight)/2+","+(s.paddingTop-s.paddingBottom)/2+")");var d=o.select(this);d.select(".label-container").remove();var p=h(d,f,s).classed("label-container",!0);a.applyStyle(p,s.style);var g=p.node().getBBox();s.width=g.width,s.height=g.height})),s=u.exit?u.exit():u.selectAll(null);return a.applyTransition(s,e).style("opacity",0).remove(),u}},function(t,e,n){var r=n(12);t.exports=function(t,e){for(var n=t.append("text"),i=function(t){for(var e,n="",r=!1,i=0;i0&&void 0!==arguments[0]?arguments[0]:"fatal";isNaN(t)&&(t=t.toLowerCase(),void 0!==h[t]&&(t=h[t])),f.trace=function(){},f.debug=function(){},f.info=function(){},f.warn=function(){},f.error=function(){},f.fatal=function(){},t<=h.fatal&&(f.fatal=console.error?console.error.bind(console,p("FATAL"),"color: orange"):console.log.bind(console,"",p("FATAL"))),t<=h.error&&(f.error=console.error?console.error.bind(console,p("ERROR"),"color: orange"):console.log.bind(console,"",p("ERROR"))),t<=h.warn&&(f.warn=console.warn?console.warn.bind(console,p("WARN"),"color: orange"):console.log.bind(console,"",p("WARN"))),t<=h.info&&(f.info=console.info?console.info.bind(console,p("INFO"),"color: lightblue"):console.log.bind(console,"",p("INFO"))),t<=h.debug&&(f.debug=console.debug?console.debug.bind(console,p("DEBUG"),"color: lightgreen"):console.log.bind(console,"",p("DEBUG")))},p=function(t){var e=l()().format("ss.SSS");return"%c".concat(e," : ").concat(t," : ")},g=n(70),y=function(t){for(var e="",n=0;n>=0;){if(!((n=t.indexOf("=0)){e+=t,n=-1;break}e+=t.substr(0,n),(n=(t=t.substr(n+1)).indexOf("<\/script>"))>=0&&(n+=9,t=t.substr(n))}return e},v=//gi,m=function(t){return t.replace(v,"#br#")},b=function(t){return t.replace(/#br#/g,"
")},x={getRows:function(t){if(!t)return 1;var e=m(t);return(e=e.replace(/\\n/g,"#br#")).split("#br#")},sanitizeText:function(t,e){var n=t,r=!0;if(!e.flowchart||!1!==e.flowchart.htmlLabels&&"false"!==e.flowchart.htmlLabels||(r=!1),r){var i=e.securityLevel;"antiscript"===i?n=y(n):"loose"!==i&&(n=(n=(n=m(n)).replace(//g,">")).replace(/=/g,"="),n=b(n))}return n},hasBreaks:function(t){return//gi.test(t)},splitBreaks:function(t){return t.split(//gi)},lineBreakRegex:v,removeScript:y};function _(t){return(_="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}function k(t){return function(t){if(Array.isArray(t)){for(var e=0,n=new Array(t.length);e1&&void 0!==arguments[1]?arguments[1]:null;try{var n=new RegExp("[%]{2}(?![{]".concat(T.source,")(?=[}][%]{2}).*\n"),"ig");t=t.trim().replace(n,"").replace(/'/gm,'"'),f.debug("Detecting diagram directive".concat(null!==e?" type:"+e:""," based on the text:").concat(t));for(var r,i=[];null!==(r=E.exec(t));)if(r.index===E.lastIndex&&E.lastIndex++,r&&!e||e&&r[1]&&r[1].match(e)||e&&r[2]&&r[2].match(e)){var a=r[1]?r[1]:r[2],o=r[3]?r[3].trim():r[4]?JSON.parse(r[4].trim()):null;i.push({type:a,args:o})}return 0===i.length&&i.push({type:t,args:null}),1===i.length?i[0]:i}catch(n){return f.error("ERROR: ".concat(n.message," - Unable to parse directive").concat(null!==e?" type:"+e:""," based on the text:").concat(t)),{type:null,args:null}}},A=function(t){return t=t.replace(E,"").replace(C,"\n"),f.debug("Detecting diagram type based on the text "+t),t.match(/^\s*sequenceDiagram/)?"sequence":t.match(/^\s*gantt/)?"gantt":t.match(/^\s*classDiagram-v2/)?"classDiagram":t.match(/^\s*classDiagram/)?"class":t.match(/^\s*stateDiagram-v2/)?"stateDiagram":t.match(/^\s*stateDiagram/)?"state":t.match(/^\s*gitGraph/)?"git":t.match(/^\s*flowchart/)?"flowchart-v2":t.match(/^\s*info/)?"info":t.match(/^\s*pie/)?"pie":t.match(/^\s*erDiagram/)?"er":t.match(/^\s*journey/)?"journey":"flowchart"},M=function(t,e){var n={};return function(){for(var r=arguments.length,i=new Array(r),a=0;a"},n),x.lineBreakRegex.test(t))return t;var r=t.split(" "),i=[],a="";return r.forEach((function(t,o){var s=Y("".concat(t," "),n),c=Y(a,n);if(s>e){var u=R(t,e,"-",n),l=u.hyphenatedStrings,h=u.remainingWord;i.push.apply(i,[a].concat(k(l))),a=h}else c+s>=e?(i.push(a),a=t):a=[a,t].filter(Boolean).join(" ");o+1===r.length&&i.push(a)})),i.filter((function(t){return""!==t})).join(n.joinWith)}),(function(t,e,n){return"".concat(t,"-").concat(e,"-").concat(n.fontSize,"-").concat(n.fontWeight,"-").concat(n.fontFamily,"-").concat(n.joinWith)})),R=M((function(t,e){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"-",r=arguments.length>3?arguments[3]:void 0;r=Object.assign({fontSize:12,fontWeight:400,fontFamily:"Arial",margin:0},r);var i=t.split(""),a=[],o="";return i.forEach((function(t,s){var c="".concat(o).concat(t);if(Y(c,r)>=e){var u=s+1,l=i.length===u,h="".concat(c).concat(n);a.push(l?c:h),o=""}else o=c})),{hyphenatedStrings:a,remainingWord:o}}),(function(t,e){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:"-",r=arguments.length>3?arguments[3]:void 0;return"".concat(t,"-").concat(e,"-").concat(n,"-").concat(r.fontSize,"-").concat(r.fontWeight,"-").concat(r.fontFamily)})),Y=function(t,e){return e=Object.assign({fontSize:12,fontWeight:400,fontFamily:"Arial"},e),z(t,e).width},z=M((function(t,e){var n=e=Object.assign({fontSize:12,fontWeight:400,fontFamily:"Arial"},e),r=n.fontSize,i=n.fontFamily,a=n.fontWeight;if(!t)return{width:0,height:0};var o=["sans-serif",i],c=t.split(x.lineBreakRegex),u=[],l=Object(s.select)("body");if(!l.remove)return{width:0,height:0,lineHeight:0};for(var h=l.append("svg"),f=0,d=o;fu[1].height&&u[0].width>u[1].width&&u[0].lineHeight>u[1].lineHeight?0:1]}),(function(t,e){return"".concat(t,"-").concat(e.fontSize,"-").concat(e.fontWeight,"-").concat(e.fontFamily)})),U=function(t,e,n){var r=new Map;return r.set("height",t),n?(r.set("width","100%"),r.set("style","max-width: ".concat(e,"px;"))):r.set("width",e),r},$=function(t,e,n,r){!function(t,e){var n=!0,r=!1,i=void 0;try{for(var a,o=e[Symbol.iterator]();!(n=(a=o.next()).done);n=!0){var s=a.value;t.attr(s[0],s[1])}}catch(t){r=!0,i=t}finally{try{n||null==o.return||o.return()}finally{if(r)throw i}}}(t,U(e,n,r))},W={assignWithDepth:P,wrapLabel:j,calculateTextHeight:function(t,e){return e=Object.assign({fontSize:12,fontWeight:400,fontFamily:"Arial",margin:15},e),z(t,e).height},calculateTextWidth:Y,calculateTextDimensions:z,calculateSvgSizeAttrs:U,configureSvgSize:$,detectInit:function(t){var e=S(t,/(?:init\b)|(?:initialize\b)/),n={};if(Array.isArray(e)){var r=e.map((function(t){return t.args}));n=P(n,k(r))}else n=e.args;if(n){var i=A(t);["config"].forEach((function(t){void 0!==n[t]&&("flowchart-v2"===i&&(i="flowchart"),n[i]=n[t],delete n[t])}))}return n},detectDirective:S,detectType:A,isSubstringInArray:function(t,e){for(var n=0;n=1&&(i={x:t.x,y:t.y}),a>0&&a<1&&(i={x:(1-a)*e.x+a*t.x,y:(1-a)*e.y+a*t.y})}}e=t})),i}(t)},calcCardinalityPosition:function(t,e,n){var r;f.info("our points",e),e[0]!==n&&(e=e.reverse()),e.forEach((function(t){D(t,r),r=t}));var i,a=25;r=void 0,e.forEach((function(t){if(r&&!i){var e=D(t,r);if(e=1&&(i={x:t.x,y:t.y}),n>0&&n<1&&(i={x:(1-n)*r.x+n*t.x,y:(1-n)*r.y+n*t.y})}}r=t}));var o=t?10:5,s=Math.atan2(e[0].y-i.y,e[0].x-i.x),c={x:0,y:0};return c.x=Math.sin(s)*o+(e[0].x+i.x)/2,c.y=-Math.cos(s)*o+(e[0].y+i.y)/2,c},calcTerminalLabelPosition:function(t,e,n){var r,i=JSON.parse(JSON.stringify(n));f.info("our points",i),"start_left"!==e&&"start_right"!==e&&(i=i.reverse()),i.forEach((function(t){D(t,r),r=t}));var a,o=25;r=void 0,i.forEach((function(t){if(r&&!a){var e=D(t,r);if(e=1&&(a={x:t.x,y:t.y}),n>0&&n<1&&(a={x:(1-n)*r.x+n*t.x,y:(1-n)*r.y+n*t.y})}}r=t}));var s=10,c=Math.atan2(i[0].y-a.y,i[0].x-a.x),u={x:0,y:0};return u.x=Math.sin(c)*s+(i[0].x+a.x)/2,u.y=-Math.cos(c)*s+(i[0].y+a.y)/2,"start_left"===e&&(u.x=Math.sin(c+Math.PI)*s+(i[0].x+a.x)/2,u.y=-Math.cos(c+Math.PI)*s+(i[0].y+a.y)/2),"end_right"===e&&(u.x=Math.sin(c-Math.PI)*s+(i[0].x+a.x)/2-5,u.y=-Math.cos(c-Math.PI)*s+(i[0].y+a.y)/2-5),"end_left"===e&&(u.x=Math.sin(c)*s+(i[0].x+a.x)/2-5,u.y=-Math.cos(c)*s+(i[0].y+a.y)/2-5),u},formatUrl:function(t,e){var n=t.trim();if(n)return"loose"!==e.securityLevel?Object(g.sanitizeUrl)(n):n},getStylesFromArray:N,generateId:L,random:F,memoize:M,runFunc:function(t){for(var e,n=t.split("."),r=n.length-1,i=n[r],a=window,o=0;o1?s-1:0),u=1;u=0&&(n=!0)})),n},Gt=function(t,e){var n=[];return t.nodes.forEach((function(r,i){Ht(e,r)||n.push(t.nodes[i])})),{nodes:n}},qt={parseDirective:function(t,e,n){$o.parseDirective(this,t,e,n)},defaultConfig:function(){return pt.flowchart},addVertex:function(t,e,n,r,i){var a,o=t;void 0!==o&&0!==o.trim().length&&(void 0===Mt[o]&&(Mt[o]={id:o,domId:"flowchart-"+o+"-"+St,styles:[],classes:[]}),St++,void 0!==e?(At=xt(),'"'===(a=x.sanitizeText(e.trim(),At))[0]&&'"'===a[a.length-1]&&(a=a.substring(1,a.length-1)),Mt[o].text=a):void 0===Mt[o].text&&(Mt[o].text=t),void 0!==n&&(Mt[o].type=n),null!=r&&r.forEach((function(t){Mt[o].styles.push(t)})),null!=i&&i.forEach((function(t){Mt[o].classes.push(t)})))},lookUpDomId:jt,addLink:function(t,e,n,r){var i,a;for(i=0;i/)&&(Tt="LR"),Tt.match(/.*v/)&&(Tt="TB")},setClass:Yt,getTooltip:function(t){return Lt[t]},setClickEvent:function(t,e,n){t.split(",").forEach((function(t){!function(t,e){var n=jt(t);"loose"===xt().securityLevel&&void 0!==e&&void 0!==Mt[t]&&(Mt[t].haveCallback=!0,It.push((function(){var r=document.querySelector('[id="'.concat(n,'"]'));null!==r&&r.addEventListener("click",(function(){W.runFunc(e,t)}),!1)})))}(t,e)})),zt(t,n),Yt(t,"clickable")},setLink:function(t,e,n,r){t.split(",").forEach((function(t){void 0!==Mt[t]&&(Mt[t].link=W.formatUrl(e,At),Mt[t].linkTarget=r)})),zt(t,n),Yt(t,"clickable")},bindFunctions:function(t){It.forEach((function(e){e(t)}))},getDirection:function(){return Tt.trim()},getVertices:function(){return Mt},getEdges:function(){return Ot},getClasses:function(){return Dt},clear:function(t){Mt={},Dt={},Ot=[],(It=[]).push(Ut),Nt=[],Bt={},Ft=0,Lt=[],Pt=!0,Ct=t||"gen-1"},setGen:function(t){Ct=t||"gen-1"},defaultStyle:function(){return"fill:#ffa;stroke: #f66; stroke-width: 3px; stroke-dasharray: 5, 5;fill:#ffa;stroke: #666;"},addSubGraph:function(t,e,n){var r=t.trim(),i=n;t===n&&n.match(/\s/)&&(r=void 0);var a,o,s,c=[];if(a=c.concat.apply(c,e),o={boolean:{},number:{},string:{}},s=[],c=a.filter((function(t){var e=Et(t);return""!==t.trim()&&(e in o?!o[e].hasOwnProperty(t)&&(o[e][t]=!0):!(s.indexOf(t)>=0)&&s.push(t))})),"gen-1"===Ct){f.warn("LOOKING UP");for(var u=0;u0&&function t(e,n){var r=Nt[n].nodes;if(!((Wt+=1)>2e3)){if(Vt[Wt]=n,Nt[n].id===e)return{result:!0,count:0};for(var i=0,a=1;i=0){var s=t(e,o);if(s.result)return{result:!0,count:a+s.count};a+=s.count}i+=1}return{result:!1,count:a}}}("none",Nt.length-1)},getSubGraphs:function(){return Nt},destructLink:function(t,e){var n,r=function(t){var e=t.trim(),n=e.slice(0,-1),r="arrow_open";switch(e.slice(-1)){case"x":r="arrow_cross","x"===e[0]&&(r="double_"+r,n=n.slice(1));break;case">":r="arrow_point","<"===e[0]&&(r="double_"+r,n=n.slice(1));break;case"o":r="arrow_circle","o"===e[0]&&(r="double_"+r,n=n.slice(1))}var i="normal",a=n.length-1;"="===n[0]&&(i="thick");var o=function(t,e){for(var n=e.length,r=0,i=0;in.height/2-a)){var o=a*a*(1-r*r/(i*i));0!=o&&(o=Math.sqrt(o)),o=a-o,t.y-n.y>0&&(o=-o),e.y+=o}return e},c}function fe(t,e,n,r){return t.insert("polygon",":first-child").attr("points",r.map((function(t){return t.x+","+t.y})).join(" ")).attr("transform","translate("+-e/2+","+n/2+")")}var de={addToRender:function(t){t.shapes().question=ee,t.shapes().hexagon=ne,t.shapes().stadium=ue,t.shapes().subroutine=le,t.shapes().cylinder=he,t.shapes().rect_left_inv_arrow=re,t.shapes().lean_right=ie,t.shapes().lean_left=ae,t.shapes().trapezoid=oe,t.shapes().inv_trapezoid=se,t.shapes().rect_right_inv_arrow=ce},addToRenderV2:function(t){t({question:ee}),t({hexagon:ne}),t({stadium:ue}),t({subroutine:le}),t({cylinder:he}),t({rect_left_inv_arrow:re}),t({lean_right:ie}),t({lean_left:ae}),t({trapezoid:oe}),t({inv_trapezoid:se}),t({rect_right_inv_arrow:ce})}},pe={},ge=function(t,e,n){var r=Object(s.select)('[id="'.concat(n,'"]'));Object.keys(t).forEach((function(n){var i=t[n],a="default";i.classes.length>0&&(a=i.classes.join(" "));var o,s=N(i.styles),c=void 0!==i.text?i.text:i.id;if(xt().flowchart.htmlLabels){var u={label:c.replace(/fa[lrsb]?:fa-[\w-]+/g,(function(t){return"")}))};(o=te()(r,u).node()).parentNode.removeChild(o)}else{var l=document.createElementNS("http://www.w3.org/2000/svg","text");l.setAttribute("style",s.labelStyle.replace("color:","fill:"));for(var h=c.split(x.lineBreakRegex),d=0;d').concat(a.text.replace(/fa[lrsb]?:fa-[\w-]+/g,(function(t){return"")})),"")):(l.labelType="text",l.label=a.text.replace(x.lineBreakRegex,"\n"),void 0===a.style&&(l.style=l.style||"stroke: #333; stroke-width: 1.5px;fill:none"),l.labelStyle=l.labelStyle.replace("color:","fill:"))),l.id=o,l.class=c+" "+u,l.minlen=a.length||1,e.setEdge(qt.lookUpDomId(a.start),qt.lookUpDomId(a.end),l,i)}))},ve=function(t){for(var e=Object.keys(t),n=0;n=0;h--)i=l[h],qt.addVertex(i.id,i.title,"group",void 0,i.classes);var d=qt.getVertices();f.warn("Get vertices",d);var p=qt.getEdges(),g=0;for(g=l.length-1;g>=0;g--){i=l[g],Object(s.selectAll)("cluster").append("text");for(var y=0;y"),f.info("vertexText"+i),function(t){var e,n,r=Object(s.select)(document.createElementNS("http://www.w3.org/2000/svg","foreignObject")),i=r.append("xhtml:div"),a=t.label,o=t.isNode?"nodeLabel":"edgeLabel";return i.html(''+a+""),e=i,(n=t.labelStyle)&&e.attr("style",n),i.style("display","inline-block"),i.style("white-space","nowrap"),i.attr("xmlns","http://www.w3.org/1999/xhtml"),r.node()}({isNode:r,label:i.replace(/fa[lrsb]?:fa-[\w-]+/g,(function(t){return"")})),labelStyle:e.replace("fill:","color:")});var a=document.createElementNS("http://www.w3.org/2000/svg","text");a.setAttribute("style",e.replace("color:","fill:"));var o=[];o="string"==typeof i?i.split(/\\n|\n|/gi):Array.isArray(i)?i:[];for(var c=0;c0)t(a,n,r,i);else{var o=n.node(a);f.info("cp ",a," to ",i," with parent ",e),r.setNode(a,o),i!==n.parent(a)&&(f.warn("Setting parent",a,n.parent(a)),r.setParent(a,n.parent(a))),e!==i&&a!==e?(f.debug("Setting parent",a,e),r.setParent(a,e)):(f.info("In copy ",e,"root",i,"data",n.node(e),i),f.debug("Not Setting parent for node=",a,"cluster!==rootId",e!==i,"node!==clusterId",a!==e));var s=n.edges(a);f.debug("Copying Edges",s),s.forEach((function(t){f.info("Edge",t);var a=n.edge(t.v,t.w,t.name);f.info("Edge data",a,i);try{!function(t,e){return f.info("Decendants of ",e," is ",Me[e]),f.info("Edge is ",t),t.v!==e&&(t.w!==e&&(Me[e]?(f.info("Here "),Me[e].indexOf(t.v)>=0||(!!De(t.v,e)||(!!De(t.w,e)||Me[e].indexOf(t.w)>=0))):(f.debug("Tilt, ",e,",not in decendants"),!1)))}(t,i)?f.info("Skipping copy of edge ",t.v,"--\x3e",t.w," rootId: ",i," clusterId:",e):(f.info("Copying as ",t.v,t.w,a,t.name),r.setEdge(t.v,t.w,a,t.name),f.info("newGraph edges ",r.edges(),r.edge(r.edges()[0])))}catch(t){f.error(t)}}))}f.debug("Removing node",a),n.removeNode(a)}))},Be=function t(e,n){f.trace("Searching",e);var r=n.children(e);if(f.trace("Searching children of id ",e,r),r.length<1)return f.trace("This is a valid node",e),e;for(var i=0;i ",a),a}},Le=function(t){return Ae[t]&&Ae[t].externalConnections&&Ae[t]?Ae[t].id:t},Fe=function(t,e){!t||e>10?f.debug("Opting out, no graph "):(f.debug("Opting in, graph "),t.nodes().forEach((function(e){t.children(e).length>0&&(f.warn("Cluster identified",e," Replacement id in edges: ",Be(e,t)),Me[e]=function t(e,n){for(var r=n.children(e),i=[].concat(r),a=0;a0?(f.debug("Cluster identified",e,Me),r.forEach((function(t){t.v!==e&&t.w!==e&&(De(t.v,e)^De(t.w,e)&&(f.warn("Edge: ",t," leaves cluster ",e),f.warn("Decendants of XXX ",e,": ",Me[e]),Ae[e].externalConnections=!0))}))):f.debug("Not a cluster ",e,Me)})),t.edges().forEach((function(e){var n=t.edge(e);f.warn("Edge "+e.v+" -> "+e.w+": "+JSON.stringify(e)),f.warn("Edge "+e.v+" -> "+e.w+": "+JSON.stringify(t.edge(e)));var r=e.v,i=e.w;f.warn("Fix XXX",Ae,"ids:",e.v,e.w,"Translateing: ",Ae[e.v]," --- ",Ae[e.w]),(Ae[e.v]||Ae[e.w])&&(f.warn("Fixing and trixing - removing XXX",e.v,e.w,e.name),r=Le(e.v),i=Le(e.w),t.removeEdge(e.v,e.w,e.name),r!==e.v&&(n.fromCluster=e.v),i!==e.w&&(n.toCluster=e.w),f.warn("Fix Replacing with XXX",r,i,e.name),t.setEdge(r,i,n,e.name))})),f.warn("Adjusted Graph",H.a.json.write(t)),Pe(t,0),f.trace(Ae))},Pe=function t(e,n){if(f.warn("extractor - ",n,H.a.json.write(e),e.children("D")),n>10)f.error("Bailing out");else{for(var r=e.nodes(),i=!1,a=0;a0}if(i){f.debug("Nodes = ",r,n);for(var c=0;c0){f.warn("Cluster without external connections, without a parent and with children",u,n);var l=e.graph(),h=new H.a.Graph({multigraph:!0,compound:!0}).setGraph({rankdir:"TB"===l.rankdir?"LR":"TB",nodesep:50,ranksep:50,marginx:8,marginy:8}).setDefaultEdgeLabel((function(){return{}}));f.warn("Old graph before copy",H.a.json.write(e)),Ne(u,e,h,u),e.setNode(u,{clusterNode:!0,id:u,clusterData:Ae[u].clusterData,labelText:Ae[u].labelText,graph:h}),f.warn("New graph after copy node: (",u,")",H.a.json.write(h)),f.debug("Old graph after copy",H.a.json.write(e))}else f.warn("Cluster ** ",u," **not meeting the criteria !externalConnections:",!Ae[u].externalConnections," no parent: ",!e.parent(u)," children ",e.children(u)&&e.children(u).length>0,e.children("D"),n),f.debug(Ae);else f.debug("Not a cluster",u,n)}r=e.nodes(),f.warn("New list of nodes",r);for(var d=0;d0}var Ue=function(t,e,n,r){var i,a,o,s,c,u,l,h,f,d,p,g,y;if(i=e.y-t.y,o=t.x-e.x,c=e.x*t.y-t.x*e.y,f=i*n.x+o*n.y+c,d=i*r.x+o*r.y+c,!(0!==f&&0!==d&&ze(f,d)||(a=r.y-n.y,s=n.x-r.x,u=r.x*n.y-n.x*r.y,l=a*t.x+s*t.y+u,h=a*e.x+s*e.y+u,0!==l&&0!==h&&ze(l,h)||0==(p=i*s-a*o))))return g=Math.abs(p/2),{x:(y=o*u-s*c)<0?(y-g)/p:(y+g)/p,y:(y=a*c-i*u)<0?(y-g)/p:(y+g)/p}},$e=function(t,e,n){var r=t.x,i=t.y,a=[],o=Number.POSITIVE_INFINITY,s=Number.POSITIVE_INFINITY;"function"==typeof e.forEach?e.forEach((function(t){o=Math.min(o,t.x),s=Math.min(s,t.y)})):(o=Math.min(o,e.x),s=Math.min(s,e.y));for(var c=r-t.width/2-o,u=i-t.height/2-s,l=0;l1&&a.sort((function(t,e){var r=t.x-n.x,i=t.y-n.y,a=Math.sqrt(r*r+i*i),o=e.x-n.x,s=e.y-n.y,c=Math.sqrt(o*o+s*s);return aMath.abs(o)*u?(s<0&&(u=-u),n=0===s?0:u*o/s,r=u):(o<0&&(c=-c),n=c,r=0===o?0:c*s/o),{x:i+n,y:a+r}},Ve={node:n.n(je).a,circle:Ye,ellipse:Re,polygon:$e,rect:We},He=function(t,e){var n=Te(t,e,"node "+e.classes,!0),r=n.shapeSvg,i=n.bbox,a=n.halfPadding;f.info("Classes = ",e.classes);var o=r.insert("rect",":first-child");return o.attr("rx",e.rx).attr("ry",e.ry).attr("x",-i.width/2-a).attr("y",-i.height/2-a).attr("width",i.width+e.padding).attr("height",i.height+e.padding),Ce(e,o),e.intersect=function(t){return Ve.rect(e,t)},r},Ge=[],qe={},Xe=0,Ze=[],Je=function(t){var e="",n=t;if(t.indexOf("~")>0){var r=t.split("~");n=r[0],e=r[1]}return{className:n,type:e}},Qe=function(t){var e=Je(t);void 0===qe[e.className]&&(qe[e.className]={id:e.className,type:e.type,cssClasses:[],methods:[],members:[],annotations:[],domId:"classid-"+e.className+"-"+Xe},Xe++)},Ke=function(t){for(var e=Object.keys(qe),n=0;n>")?r.annotations.push(i.substring(2,i.length-2)):i.indexOf(")")>0?r.methods.push(i):i&&r.members.push(i)}},en=function(t,e){t.split(",").forEach((function(t){var n=t;t[0].match(/\d/)&&(n="classid-"+n),void 0!==qe[n]&&qe[n].cssClasses.push(e)}))},nn=function(t,e,n){var r=xt(),i=t,a=Ke(i);"loose"===r.securityLevel&&void 0!==e&&void 0!==qe[i]&&(n&&(qe[i].tooltip=x.sanitizeText(n,r)),Ze.push((function(){var t=document.querySelector('[id="'.concat(a,'"]'));null!==t&&t.addEventListener("click",(function(){W.runFunc(e,a)}),!1)})))},rn={AGGREGATION:0,EXTENSION:1,COMPOSITION:2,DEPENDENCY:3},an=function(t){var e=Object(s.select)(".mermaidTooltip");null===(e._groups||e)[0][0]&&(e=Object(s.select)("body").append("div").attr("class","mermaidTooltip").style("opacity",0)),Object(s.select)(t).select("svg").selectAll("g.node").on("mouseover",(function(){var t=Object(s.select)(this);if(null!==t.attr("title")){var n=this.getBoundingClientRect();e.transition().duration(200).style("opacity",".9"),e.html(t.attr("title")).style("left",window.scrollX+n.left+(n.right-n.left)/2+"px").style("top",window.scrollY+n.top-14+document.body.scrollTop+"px"),t.classed("hover",!0)}})).on("mouseout",(function(){e.transition().duration(500).style("opacity",0),Object(s.select)(this).classed("hover",!1)}))};Ze.push(an);var on={parseDirective:function(t,e,n){$o.parseDirective(this,t,e,n)},getConfig:function(){return xt().class},addClass:Qe,bindFunctions:function(t){Ze.forEach((function(e){e(t)}))},clear:function(){Ge=[],qe={},(Ze=[]).push(an)},getClass:function(t){return qe[t]},getClasses:function(){return qe},addAnnotation:function(t,e){var n=Je(t).className;qe[n].annotations.push(e)},getRelations:function(){return Ge},addRelation:function(t){f.debug("Adding relation: "+JSON.stringify(t)),Qe(t.id1),Qe(t.id2),t.id1=Je(t.id1).className,t.id2=Je(t.id2).className,Ge.push(t)},addMember:tn,addMembers:function(t,e){Array.isArray(e)&&(e.reverse(),e.forEach((function(e){return tn(t,e)})))},cleanupLabel:function(t){return":"===t.substring(0,1)?t.substr(1).trim():t.trim()},lineType:{LINE:0,DOTTED_LINE:1},relationType:rn,setClickEvent:function(t,e,n){t.split(",").forEach((function(t){nn(t,e,n),qe[t].haveCallback=!0})),en(t,"clickable")},setCssClass:en,setLink:function(t,e,n){var r=xt();t.split(",").forEach((function(t){var i=t;t[0].match(/\d/)&&(i="classid-"+i),void 0!==qe[i]&&(qe[i].link=W.formatUrl(e,r),n&&(qe[i].tooltip=x.sanitizeText(n,r)))})),en(t,"clickable")},lookUpDomId:Ke},sn=0,cn=function(t){var e=t.match(/(\+|-|~|#)?(\w+)(~\w+~|\[\])?\s+(\w+)/),n=t.match(/^([+|\-|~|#])?(\w+) *\( *(.*)\) *(\*|\$)? *(\w*[~|[\]]*\s*\w*~?)$/);return e&&!n?un(e):n?ln(n):hn(t)},un=function(t){var e="";try{e=(t[1]?t[1].trim():"")+(t[2]?t[2].trim():"")+(t[3]?dn(t[3].trim()):"")+" "+(t[4]?t[4].trim():"")}catch(n){e=t}return{displayText:e,cssStyle:""}},ln=function(t){var e="",n="";try{var r=t[1]?t[1].trim():"",i=t[2]?t[2].trim():"",a=t[3]?dn(t[3].trim()):"",o=t[4]?t[4].trim():"";n=r+i+"("+a+")"+(t[5]?" : "+dn(t[5]).trim():""),e=pn(o)}catch(e){n=t}return{displayText:n,cssStyle:e}},hn=function(t){var e="",n="",r="",i=t.indexOf("("),a=t.indexOf(")");if(i>1&&a>i&&a<=t.length){var o="",s="",c=t.substring(0,1);c.match(/\w/)?s=t.substring(0,i).trim():(c.match(/\+|-|~|#/)&&(o=c),s=t.substring(1,i).trim());var u=t.substring(i+1,a),l=t.substring(a+1,1);n=pn(l),e=o+s+"("+dn(u.trim())+")",a<"".length&&""!==(r=t.substring(a+2).trim())&&(r=" : "+dn(r))}else e=dn(t);return{displayText:e,cssStyle:n}},fn=function(t,e,n,r){var i=cn(e),a=t.append("tspan").attr("x",r.padding).text(i.displayText);""!==i.cssStyle&&a.attr("style",i.cssStyle),n||a.attr("dy",r.textHeight)},dn=function t(e){var n=e;return-1!=e.indexOf("~")?t(n=(n=n.replace("~","<")).replace("~",">")):n},pn=function(t){switch(t){case"*":return"font-style:italic;";case"$":return"text-decoration:underline;";default:return""}},gn=function(t,e,n){f.info("Rendering class "+e);var r,i=e.id,a={id:i,label:e.id,width:0,height:0},o=t.append("g").attr("id",Ke(i)).attr("class","classGroup");r=e.link?o.append("svg:a").attr("xlink:href",e.link).attr("target","_blank").append("text").attr("y",n.textHeight+n.padding).attr("x",0):o.append("text").attr("y",n.textHeight+n.padding).attr("x",0);var s=!0;e.annotations.forEach((function(t){var e=r.append("tspan").text("«"+t+"»");s||e.attr("dy",n.textHeight),s=!1}));var c=e.id;void 0!==e.type&&""!==e.type&&(c+="<"+e.type+">");var u=r.append("tspan").text(c).attr("class","title");s||u.attr("dy",n.textHeight);var l=r.node().getBBox().height,h=o.append("line").attr("x1",0).attr("y1",n.padding+l+n.dividerMargin/2).attr("y2",n.padding+l+n.dividerMargin/2),d=o.append("text").attr("x",n.padding).attr("y",l+n.dividerMargin+n.textHeight).attr("fill","white").attr("class","classText");s=!0,e.members.forEach((function(t){fn(d,t,s,n),s=!1}));var p=d.node().getBBox(),g=o.append("line").attr("x1",0).attr("y1",n.padding+l+n.dividerMargin+p.height).attr("y2",n.padding+l+n.dividerMargin+p.height),y=o.append("text").attr("x",n.padding).attr("y",l+2*n.dividerMargin+p.height+n.textHeight).attr("fill","white").attr("class","classText");s=!0,e.methods.forEach((function(t){fn(y,t,s,n),s=!1}));var v=o.node().getBBox(),m=" ";e.cssClasses.length>0&&(m+=e.cssClasses.join(" "));var b=o.insert("rect",":first-child").attr("x",0).attr("y",0).attr("width",v.width+2*n.padding).attr("height",v.height+n.padding+.5*n.dividerMargin).attr("class",m).node().getBBox().width;return r.node().childNodes.forEach((function(t){t.setAttribute("x",(b-t.getBBox().width)/2)})),e.tooltip&&r.insert("title").text(e.tooltip),h.attr("x2",b),g.attr("x2",b),a.width=b,a.height=v.height+n.padding+.5*n.dividerMargin,a},yn=function(t,e,n,r){var i=function(t){switch(t){case rn.AGGREGATION:return"aggregation";case rn.EXTENSION:return"extension";case rn.COMPOSITION:return"composition";case rn.DEPENDENCY:return"dependency"}};e.points=e.points.filter((function(t){return!Number.isNaN(t.y)}));var a,o,c=e.points,u=Object(s.line)().x((function(t){return t.x})).y((function(t){return t.y})).curve(s.curveBasis),l=t.append("path").attr("d",u(c)).attr("id","edge"+sn).attr("class","relation"),h="";r.arrowMarkerAbsolute&&(h=(h=(h=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search).replace(/\(/g,"\\(")).replace(/\)/g,"\\)")),1==n.relation.lineType&&l.attr("class","relation dashed-line"),"none"!==n.relation.type1&&l.attr("marker-start","url("+h+"#"+i(n.relation.type1)+"Start)"),"none"!==n.relation.type2&&l.attr("marker-end","url("+h+"#"+i(n.relation.type2)+"End)");var d,p,g,y,v=e.points.length,m=W.calcLabelPosition(e.points);if(a=m.x,o=m.y,v%2!=0&&v>1){var b=W.calcCardinalityPosition("none"!==n.relation.type1,e.points,e.points[0]),x=W.calcCardinalityPosition("none"!==n.relation.type2,e.points,e.points[v-1]);f.debug("cardinality_1_point "+JSON.stringify(b)),f.debug("cardinality_2_point "+JSON.stringify(x)),d=b.x,p=b.y,g=x.x,y=x.y}if(void 0!==n.title){var _=t.append("g").attr("class","classLabel"),k=_.append("text").attr("class","label").attr("x",a).attr("y",o).attr("fill","red").attr("text-anchor","middle").text(n.title);window.label=k;var w=k.node().getBBox();_.insert("rect",":first-child").attr("class","box").attr("x",w.x-r.padding/2).attr("y",w.y-r.padding/2).attr("width",w.width+r.padding).attr("height",w.height+r.padding)}(f.info("Rendering relation "+JSON.stringify(n)),void 0!==n.relationTitle1&&"none"!==n.relationTitle1)&&t.append("g").attr("class","cardinality").append("text").attr("class","type1").attr("x",d).attr("y",p).attr("fill","black").attr("font-size","6").text(n.relationTitle1);void 0!==n.relationTitle2&&"none"!==n.relationTitle2&&t.append("g").attr("class","cardinality").append("text").attr("class","type2").attr("x",g).attr("y",y).attr("fill","black").attr("font-size","6").text(n.relationTitle2);sn++},vn=function(t,e,n){var r=t.insert("g").attr("class","node default").attr("id",e.domId||e.id),i=70,a=10;"LR"===n&&(i=10,a=70);var o=r.append("rect").style("stroke","black").style("fill","black").attr("x",-1*i/2).attr("y",-1*a/2).attr("width",i).attr("height",a).attr("class","fork-join");return Ce(e,o),e.height=e.height+e.padding/2,e.width=e.width+e.padding/2,e.intersect=function(t){return Ve.rect(e,t)},r},mn={question:function(t,e){var n=Te(t,e,void 0,!0),r=n.shapeSvg,i=n.bbox,a=i.width+e.padding+(i.height+e.padding),o=[{x:a/2,y:0},{x:a,y:-a/2},{x:a/2,y:-a},{x:0,y:-a/2}];f.info("Question main (Circle)");var s=Se(r,a,a,o);return Ce(e,s),e.intersect=function(t){return f.warn("Intersect called"),Ve.polygon(e,o,t)},r},rect:function(t,e){var n=Te(t,e,"node "+e.classes,!0),r=n.shapeSvg,i=n.bbox,a=n.halfPadding;f.trace("Classes = ",e.classes);var o=r.insert("rect",":first-child");return o.attr("class","basic label-container").attr("style",e.style).attr("rx",e.rx).attr("ry",e.ry).attr("x",-i.width/2-a).attr("y",-i.height/2-a).attr("width",i.width+e.padding).attr("height",i.height+e.padding),Ce(e,o),e.intersect=function(t){return Ve.rect(e,t)},r},rectWithTitle:function(t,e){var n;n=e.classes?"node "+e.classes:"node default";var r=t.insert("g").attr("class",n).attr("id",e.domId||e.id),i=r.insert("rect",":first-child"),a=r.insert("line"),o=r.insert("g").attr("class","label"),c=e.labelText.flat();f.info("Label text",c[0]);var u,l=o.node().appendChild(Ee(c[0],e.labelStyle,!0,!0));if(xt().flowchart.htmlLabels){var h=l.children[0],d=Object(s.select)(l);u=h.getBoundingClientRect(),d.attr("width",u.width),d.attr("height",u.height)}f.info("Text 2",c);var p=c.slice(1,c.length),g=l.getBBox(),y=o.node().appendChild(Ee(p.join("
"),e.labelStyle,!0,!0));if(xt().flowchart.htmlLabels){var v=y.children[0],m=Object(s.select)(y);u=v.getBoundingClientRect(),m.attr("width",u.width),m.attr("height",u.height)}var b=e.padding/2;return Object(s.select)(y).attr("transform","translate( "+(u.width>g.width?0:(g.width-u.width)/2)+", "+(g.height+b+5)+")"),Object(s.select)(l).attr("transform","translate( "+(u.widthe.height/2-s)){var i=s*s*(1-r*r/(o*o));0!=i&&(i=Math.sqrt(i)),i=s-i,t.y-e.y>0&&(i=-i),n.y+=i}return n},r},start:function(t,e){var n=t.insert("g").attr("class","node default").attr("id",e.domId||e.id),r=n.insert("circle",":first-child");return r.attr("class","state-start").attr("r",7).attr("width",14).attr("height",14),Ce(e,r),e.intersect=function(t){return Ve.circle(e,7,t)},n},end:function(t,e){var n=t.insert("g").attr("class","node default").attr("id",e.domId||e.id),r=n.insert("circle",":first-child"),i=n.insert("circle",":first-child");return i.attr("class","state-start").attr("r",7).attr("width",14).attr("height",14),r.attr("class","state-end").attr("r",5).attr("width",10).attr("height",10),Ce(e,i),e.intersect=function(t){return Ve.circle(e,7,t)},n},note:He,subroutine:function(t,e){var n=Te(t,e,void 0,!0),r=n.shapeSvg,i=n.bbox,a=i.width+e.padding,o=i.height+e.padding,s=Se(r,a,o,[{x:0,y:0},{x:a,y:0},{x:a,y:-o},{x:0,y:-o},{x:0,y:0},{x:-8,y:0},{x:a+8,y:0},{x:a+8,y:-o},{x:-8,y:-o},{x:-8,y:0}]);return Ce(e,s),e.intersect=function(t){return Ve.polygon(e,t)},r},fork:vn,join:vn,class_box:function(t,e){var n,r=e.padding/2;n=e.classes?"node "+e.classes:"node default";var i=t.insert("g").attr("class",n).attr("id",e.domId||e.id),a=i.insert("rect",":first-child"),o=i.insert("line"),c=i.insert("line"),u=0,l=4,h=i.insert("g").attr("class","label"),f=0,d=e.classData.annotations&&e.classData.annotations[0],p=e.classData.annotations[0]?"«"+e.classData.annotations[0]+"»":"",g=h.node().appendChild(Ee(p,e.labelStyle,!0,!0)),y=g.getBBox();if(xt().flowchart.htmlLabels){var v=g.children[0],m=Object(s.select)(g);y=v.getBoundingClientRect(),m.attr("width",y.width),m.attr("height",y.height)}e.classData.annotations[0]&&(l+=y.height+4,u+=y.width);var b=e.classData.id;void 0!==e.classData.type&&""!==e.classData.type&&(b+="<"+e.classData.type+">");var x=h.node().appendChild(Ee(b,e.labelStyle,!0,!0));Object(s.select)(x).attr("class","classTitle");var _=x.getBBox();if(xt().flowchart.htmlLabels){var k=x.children[0],w=Object(s.select)(x);_=k.getBoundingClientRect(),w.attr("width",_.width),w.attr("height",_.height)}l+=_.height+4,_.width>u&&(u=_.width);var E=[];e.classData.members.forEach((function(t){var n=cn(t).displayText,r=h.node().appendChild(Ee(n,e.labelStyle,!0,!0)),i=r.getBBox();if(xt().flowchart.htmlLabels){var a=r.children[0],o=Object(s.select)(r);i=a.getBoundingClientRect(),o.attr("width",i.width),o.attr("height",i.height)}i.width>u&&(u=i.width),l+=i.height+4,E.push(r)})),l+=8;var T=[];if(e.classData.methods.forEach((function(t){var n=cn(t).displayText,r=h.node().appendChild(Ee(n,e.labelStyle,!0,!0)),i=r.getBBox();if(xt().flowchart.htmlLabels){var a=r.children[0],o=Object(s.select)(r);i=a.getBoundingClientRect(),o.attr("width",i.width),o.attr("height",i.height)}i.width>u&&(u=i.width),l+=i.height+4,T.push(r)})),l+=8,d){var C=(u-y.width)/2;Object(s.select)(g).attr("transform","translate( "+(-1*u/2+C)+", "+-1*l/2+")"),f=y.height+4}var S=(u-_.width)/2;return Object(s.select)(x).attr("transform","translate( "+(-1*u/2+S)+", "+(-1*l/2+f)+")"),f+=_.height+4,o.attr("class","divider").attr("x1",-u/2-r).attr("x2",u/2+r).attr("y1",-l/2-r+8+f).attr("y2",-l/2-r+8+f),f+=8,E.forEach((function(t){Object(s.select)(t).attr("transform","translate( "+-u/2+", "+(-1*l/2+f+4)+")"),f+=_.height+4})),f+=8,c.attr("class","divider").attr("x1",-u/2-r).attr("x2",u/2+r).attr("y1",-l/2-r+8+f).attr("y2",-l/2-r+8+f),f+=8,T.forEach((function(t){Object(s.select)(t).attr("transform","translate( "+-u/2+", "+(-1*l/2+f)+")"),f+=_.height+4})),a.attr("class","outer title-state").attr("x",-u/2-r).attr("y",-l/2-r).attr("width",u+e.padding).attr("height",l+e.padding),Ce(e,a),e.intersect=function(t){return Ve.rect(e,t)},i}},bn={},xn=function(t){var e=bn[t.id];f.trace("Transforming node",t,"translate("+(t.x-t.width/2-5)+", "+(t.y-t.height/2-5)+")");t.clusterNode?e.attr("transform","translate("+(t.x-t.width/2-8)+", "+(t.y-t.height/2-8)+")"):e.attr("transform","translate("+t.x+", "+t.y+")")},_n={rect:function(t,e){f.trace("Creating subgraph rect for ",e.id,e);var n=t.insert("g").attr("class","cluster"+(e.class?" "+e.class:"")).attr("id",e.id),r=n.insert("rect",":first-child"),i=n.insert("g").attr("class","cluster-label"),a=i.node().appendChild(Ee(e.labelText,e.labelStyle,void 0,!0)),o=a.getBBox();if(xt().flowchart.htmlLabels){var c=a.children[0],u=Object(s.select)(a);o=c.getBoundingClientRect(),u.attr("width",o.width),u.attr("height",o.height)}var l=0*e.padding,h=l/2;f.trace("Data ",e,JSON.stringify(e)),r.attr("rx",e.rx).attr("ry",e.ry).attr("x",e.x-e.width/2-h).attr("y",e.y-e.height/2-h).attr("width",e.width+l).attr("height",e.height+l),i.attr("transform","translate("+(e.x-o.width/2)+", "+(e.y-e.height/2-e.padding/3+3)+")");var d=r.node().getBBox();return e.width=d.width,e.height=d.height,e.intersect=function(t){return We(e,t)},n},roundedWithTitle:function(t,e){var n=t.insert("g").attr("class",e.classes).attr("id",e.id),r=n.insert("rect",":first-child"),i=n.insert("g").attr("class","cluster-label"),a=n.append("rect"),o=i.node().appendChild(Ee(e.labelText,e.labelStyle,void 0,!0)),c=o.getBBox();if(xt().flowchart.htmlLabels){var u=o.children[0],l=Object(s.select)(o);c=u.getBoundingClientRect(),l.attr("width",c.width),l.attr("height",c.height)}c=o.getBBox();var h=0*e.padding,f=h/2;r.attr("class","outer").attr("x",e.x-e.width/2-f).attr("y",e.y-e.height/2-f).attr("width",e.width+h).attr("height",e.height+h),a.attr("class","inner").attr("x",e.x-e.width/2-f).attr("y",e.y-e.height/2-f+c.height-1).attr("width",e.width+h).attr("height",e.height+h-c.height-3),i.attr("transform","translate("+(e.x-c.width/2)+", "+(e.y-e.height/2-e.padding/3+(xt().flowchart.htmlLabels?5:3))+")");var d=r.node().getBBox();return e.width=d.width,e.height=d.height,e.intersect=function(t){return We(e,t)},n},noteGroup:function(t,e){var n=t.insert("g").attr("class","note-cluster").attr("id",e.id),r=n.insert("rect",":first-child"),i=0*e.padding,a=i/2;r.attr("rx",e.rx).attr("ry",e.ry).attr("x",e.x-e.width/2-a).attr("y",e.y-e.height/2-a).attr("width",e.width+i).attr("height",e.height+i).attr("fill","none");var o=r.node().getBBox();return e.width=o.width,e.height=o.height,e.intersect=function(t){return We(e,t)},n},divider:function(t,e){var n=t.insert("g").attr("class",e.classes).attr("id",e.id),r=n.insert("rect",":first-child"),i=0*e.padding,a=i/2;r.attr("class","divider").attr("x",e.x-e.width/2-a).attr("y",e.y-e.height/2).attr("width",e.width+i).attr("height",e.height+i);var o=r.node().getBBox();return e.width=o.width,e.height=o.height,e.intersect=function(t){return We(e,t)},n}},kn={},wn={},En={},Tn=function(t,e){var n=t.x,r=t.y,i=Math.abs(e.x-n),a=Math.abs(e.y-r),o=t.width/2,s=t.height/2;return i>=o||a>=s},Cn=function(t,e,n){f.warn("intersection calc o:",e," i:",n,t);var r=t.x,i=t.y,a=Math.abs(r-n.x),o=t.width/2,s=n.xMath.abs(r-e.x)*c){var y=n.y0&&f.info("Recursive edges",n.edge(n.edges()[0]));var c=o.insert("g").attr("class","clusters"),u=o.insert("g").attr("class","edgePaths"),l=o.insert("g").attr("class","edgeLabels"),h=o.insert("g").attr("class","nodes");return n.nodes().forEach((function(e){var o=n.node(e);if(void 0!==i){var s=JSON.parse(JSON.stringify(i.clusterData));f.info("Setting data for cluster XXX (",e,") ",s,i),n.setNode(i.id,s),n.parent(e)||(f.warn("Setting parent",e,i.id),n.setParent(e,i.id,s))}if(f.info("(Insert) Node XXX"+e+": "+JSON.stringify(n.node(e))),o&&o.clusterNode){f.info("Cluster identified",e,o,n.node(e));var c=t(h,o.graph,r,n.node(e));Ce(o,c),function(t,e){bn[e.id]=t}(c,o),f.warn("Recursive render complete",c,o)}else n.children(e).length>0?(f.info("Cluster - the non recursive path XXX",e,o.id,o,n),f.info(Be(o.id,n)),Ae[o.id]={id:Be(o.id,n),node:o}):(f.info("Node - the non recursive path",e,o.id,o),function(t,e,n){var r,i;e.link?(r=t.insert("svg:a").attr("xlink:href",e.link).attr("target",e.linkTarget||"_blank"),i=mn[e.shape](r,e,n)):r=i=mn[e.shape](t,e,n),e.tooltip&&i.attr("title",e.tooltip),e.class&&i.attr("class","node default "+e.class),bn[e.id]=r,e.haveCallback&&bn[e.id].attr("class",bn[e.id].attr("class")+" clickable")}(h,n.node(e),a))})),n.edges().forEach((function(t){var e=n.edge(t.v,t.w,t.name);f.info("Edge "+t.v+" -> "+t.w+": "+JSON.stringify(t)),f.info("Edge "+t.v+" -> "+t.w+": ",t," ",JSON.stringify(n.edge(t))),f.info("Fix",Ae,"ids:",t.v,t.w,"Translateing: ",Ae[t.v],Ae[t.w]),function(t,e){var n=Ee(e.label,e.labelStyle),r=t.insert("g").attr("class","edgeLabel"),i=r.insert("g").attr("class","label");i.node().appendChild(n);var a=n.getBBox();if(xt().flowchart.htmlLabels){var o=n.children[0],c=Object(s.select)(n);a=o.getBoundingClientRect(),c.attr("width",a.width),c.attr("height",a.height)}if(i.attr("transform","translate("+-a.width/2+", "+-a.height/2+")"),wn[e.id]=r,e.width=a.width,e.height=a.height,e.startLabelLeft){var u=Ee(e.startLabelLeft,e.labelStyle),l=t.insert("g").attr("class","edgeTerminals"),h=l.insert("g").attr("class","inner");h.node().appendChild(u);var f=u.getBBox();h.attr("transform","translate("+-f.width/2+", "+-f.height/2+")"),En[e.id]||(En[e.id]={}),En[e.id].startLeft=l}if(e.startLabelRight){var d=Ee(e.startLabelRight,e.labelStyle),p=t.insert("g").attr("class","edgeTerminals"),g=p.insert("g").attr("class","inner");p.node().appendChild(d),g.node().appendChild(d);var y=d.getBBox();g.attr("transform","translate("+-y.width/2+", "+-y.height/2+")"),En[e.id]||(En[e.id]={}),En[e.id].startRight=p}if(e.endLabelLeft){var v=Ee(e.endLabelLeft,e.labelStyle),m=t.insert("g").attr("class","edgeTerminals"),b=m.insert("g").attr("class","inner");b.node().appendChild(v);var x=v.getBBox();b.attr("transform","translate("+-x.width/2+", "+-x.height/2+")"),m.node().appendChild(v),En[e.id]||(En[e.id]={}),En[e.id].endLeft=m}if(e.endLabelRight){var _=Ee(e.endLabelRight,e.labelStyle),k=t.insert("g").attr("class","edgeTerminals"),w=k.insert("g").attr("class","inner");w.node().appendChild(_);var E=_.getBBox();w.attr("transform","translate("+-E.width/2+", "+-E.height/2+")"),k.node().appendChild(_),En[e.id]||(En[e.id]={}),En[e.id].endRight=k}}(l,e)})),n.edges().forEach((function(t){f.info("Edge "+t.v+" -> "+t.w+": "+JSON.stringify(t))})),f.info("#############################################"),f.info("### Layout ###"),f.info("#############################################"),f.info(n),_e.a.layout(n),f.info("Graph after layout:",H.a.json.write(n)),Ie(n).forEach((function(t){var e=n.node(t);f.info("Position "+t+": "+JSON.stringify(n.node(t))),f.info("Position "+t+": ("+e.x,","+e.y,") width: ",e.width," height: ",e.height),e&&e.clusterNode?xn(e):n.children(t).length>0?(!function(t,e){f.trace("Inserting cluster");var n=e.shape||"rect";kn[e.id]=_n[n](t,e)}(c,e),Ae[e.id].node=e):xn(e)})),n.edges().forEach((function(t){var e=n.edge(t);f.info("Edge "+t.v+" -> "+t.w+": "+JSON.stringify(e),e);var i=function(t,e,n,r,i,a){var o=n.points,c=!1,u=a.node(e.v),l=a.node(e.w);if(l.intersect&&u.intersect&&((o=o.slice(1,n.points.length-1)).unshift(u.intersect(o[0])),f.info("Last point",o[o.length-1],l,l.intersect(o[o.length-1])),o.push(l.intersect(o[o.length-1]))),n.toCluster){var h;f.trace("edge",n),f.trace("to cluster",r[n.toCluster]),o=[];var d=!1;n.points.forEach((function(t){var e=r[n.toCluster].node;if(Tn(e,t)||d)d||o.push(t);else{f.trace("inside",n.toCluster,t,h);var i=Cn(e,h,t),a=!1;o.forEach((function(t){a=a||t.x===i.x&&t.y===i.y})),o.find((function(t){return t.x===i.x&&t.y===i.y}))?f.warn("no intersect",i,o):o.push(i),d=!0}h=t})),c=!0}if(n.fromCluster){f.trace("edge",n),f.warn("from cluster",r[n.fromCluster]);for(var p,g=[],y=!1,v=o.length-1;v>=0;v--){var m=o[v],b=r[n.fromCluster].node;if(Tn(b,m)||y)f.trace("Outside point",m),y||g.unshift(m);else{f.warn("inside",n.fromCluster,m,b);var x=Cn(b,p,m);g.unshift(x),y=!0}p=m}o=g,c=!0}var _,k=o.filter((function(t){return!Number.isNaN(t.y)})),w=Object(s.line)().x((function(t){return t.x})).y((function(t){return t.y})).curve(s.curveBasis);switch(n.thickness){case"normal":_="edge-thickness-normal";break;case"thick":_="edge-thickness-thick";break;default:_=""}switch(n.pattern){case"solid":_+=" edge-pattern-solid";break;case"dotted":_+=" edge-pattern-dotted";break;case"dashed":_+=" edge-pattern-dashed"}var E=t.append("path").attr("d",w(k)).attr("id",n.id).attr("class"," "+_+(n.classes?" "+n.classes:"")).attr("style",n.style),T="";switch(xt().state.arrowMarkerAbsolute&&(T=(T=(T=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search).replace(/\(/g,"\\(")).replace(/\)/g,"\\)")),f.info("arrowTypeStart",n.arrowTypeStart),f.info("arrowTypeEnd",n.arrowTypeEnd),n.arrowTypeStart){case"arrow_cross":E.attr("marker-start","url("+T+"#"+i+"-crossStart)");break;case"arrow_point":E.attr("marker-start","url("+T+"#"+i+"-pointStart)");break;case"arrow_barb":E.attr("marker-start","url("+T+"#"+i+"-barbStart)");break;case"arrow_circle":E.attr("marker-start","url("+T+"#"+i+"-circleStart)");break;case"aggregation":E.attr("marker-start","url("+T+"#"+i+"-aggregationStart)");break;case"extension":E.attr("marker-start","url("+T+"#"+i+"-extensionStart)");break;case"composition":E.attr("marker-start","url("+T+"#"+i+"-compositionStart)");break;case"dependency":E.attr("marker-start","url("+T+"#"+i+"-dependencyStart)")}switch(n.arrowTypeEnd){case"arrow_cross":E.attr("marker-end","url("+T+"#"+i+"-crossEnd)");break;case"arrow_point":E.attr("marker-end","url("+T+"#"+i+"-pointEnd)");break;case"arrow_barb":E.attr("marker-end","url("+T+"#"+i+"-barbEnd)");break;case"arrow_circle":E.attr("marker-end","url("+T+"#"+i+"-circleEnd)");break;case"aggregation":E.attr("marker-end","url("+T+"#"+i+"-aggregationEnd)");break;case"extension":E.attr("marker-end","url("+T+"#"+i+"-extensionEnd)");break;case"composition":E.attr("marker-end","url("+T+"#"+i+"-compositionEnd)");break;case"dependency":E.attr("marker-end","url("+T+"#"+i+"-dependencyEnd)")}var C={};return c&&(C.updatedPath=o),C.originalPath=n.points,C}(u,t,e,Ae,r,n);!function(t,e){f.info("Moving label",t.id,t.label,wn[t.id]);var n=e.updatedPath?e.updatedPath:e.originalPath;if(t.label){var r=wn[t.id],i=t.x,a=t.y;if(n){var o=W.calcLabelPosition(n);f.info("Moving label from (",i,",",a,") to (",o.x,",",o.y,")")}r.attr("transform","translate("+i+", "+a+")")}if(t.startLabelLeft){var s=En[t.id].startLeft,c=t.x,u=t.y;if(n){var l=W.calcTerminalLabelPosition(0,"start_left",n);c=l.x,u=l.y}s.attr("transform","translate("+c+", "+u+")")}if(t.startLabelRight){var h=En[t.id].startRight,d=t.x,p=t.y;if(n){var g=W.calcTerminalLabelPosition(0,"start_right",n);d=g.x,p=g.y}h.attr("transform","translate("+d+", "+p+")")}if(t.endLabelLeft){var y=En[t.id].endLeft,v=t.x,m=t.y;if(n){var b=W.calcTerminalLabelPosition(0,"end_left",n);v=b.x,m=b.y}y.attr("transform","translate("+v+", "+m+")")}if(t.endLabelRight){var x=En[t.id].endRight,_=t.x,k=t.y;if(n){var w=W.calcTerminalLabelPosition(0,"end_right",n);_=w.x,k=w.y}x.attr("transform","translate("+_+", "+k+")")}}(e,i)})),o},An=function(t,e,n,r,i){we(t,n,r,i),bn={},wn={},En={},kn={},Me={},Oe={},Ae={},f.warn("Graph at first:",H.a.json.write(e)),Fe(e),f.warn("Graph after:",H.a.json.write(e)),Sn(t,e,r)},Mn={},On=function(t,e,n){var r=Object(s.select)('[id="'.concat(n,'"]'));Object.keys(t).forEach((function(n){var i=t[n],a="default";i.classes.length>0&&(a=i.classes.join(" "));var o,s=N(i.styles),c=void 0!==i.text?i.text:i.id;if(xt().flowchart.htmlLabels){var u={label:c.replace(/fa[lrsb]?:fa-[\w-]+/g,(function(t){return"")}))};(o=te()(r,u).node()).parentNode.removeChild(o)}else{var l=document.createElementNS("http://www.w3.org/2000/svg","text");l.setAttribute("style",s.labelStyle.replace("color:","fill:"));for(var h=c.split(x.lineBreakRegex),d=0;d=0;h--)i=l[h],f.info("Subgraph - ",i),qt.addVertex(i.id,i.title,"group",void 0,i.classes);var d=qt.getVertices(),p=qt.getEdges();f.info(p);var g=0;for(g=l.length-1;g>=0;g--){i=l[g],Object(s.selectAll)("cluster").append("text");for(var y=0;y0)switch(e.valign){case"top":case"start":s=function(){return Math.round(e.y+e.textMargin)};break;case"middle":case"center":s=function(){return Math.round(e.y+(n+r+e.textMargin)/2)};break;case"bottom":case"end":s=function(){return Math.round(e.y+(n+r+2*e.textMargin)-e.textMargin)}}if(void 0!==e.anchor&&void 0!==e.textMargin&&void 0!==e.width)switch(e.anchor){case"left":case"start":e.x=Math.round(e.x+e.textMargin),e.anchor="start",e.dominantBaseline="text-after-edge",e.alignmentBaseline="middle";break;case"middle":case"center":e.x=Math.round(e.x+e.width/2),e.anchor="middle",e.dominantBaseline="middle",e.alignmentBaseline="middle";break;case"right":case"end":e.x=Math.round(e.x+e.width-e.textMargin),e.anchor="end",e.dominantBaseline="text-before-edge",e.alignmentBaseline="middle"}for(var c=0;c0&&(r+=(l._groups||l)[0][0].getBBox().height,n=r),a.push(l)}return a},Pn=function(t,e){var n,r,i,a,o,s=t.append("polygon");return s.attr("points",(n=e.x,r=e.y,i=e.width,a=e.height,n+","+r+" "+(n+i)+","+r+" "+(n+i)+","+(r+a-(o=7))+" "+(n+i-1.2*o)+","+(r+a)+" "+n+","+(r+a))),s.attr("class","labelBox"),e.y=e.y+e.height/2,Fn(t,e),s},In=-1,jn=function(){return{x:0,y:0,fill:void 0,anchor:void 0,style:"#666",width:void 0,height:void 0,textMargin:0,rx:0,ry:0,tspan:!0,valign:void 0}},Rn=function(){return{x:0,y:0,fill:"#EDF2AE",stroke:"#666",width:100,anchor:"start",height:100,rx:0,ry:0}},Yn=function(){function t(t,e,n,i,a,o,s){r(e.append("text").attr("x",n+a/2).attr("y",i+o/2+5).style("text-anchor","middle").text(t),s)}function e(t,e,n,i,a,o,s,c){for(var u=c.actorFontSize,l=c.actorFontFamily,h=c.actorFontWeight,f=t.split(x.lineBreakRegex),d=0;d2&&void 0!==arguments[2]?arguments[2]:{text:void 0,wrap:void 0},r=arguments.length>3?arguments[3]:void 0;if(r===nr.ACTIVE_END){var i=Kn(t.actor);if(i<1){var a=new Error("Trying to inactivate an inactive participant ("+t.actor+")");throw a.hash={text:"->>-",token:"->>-",line:"1",loc:{first_line:1,last_line:1,first_column:1,last_column:1},expected:["'ACTIVE_PARTICIPANT'"]},a}}return Hn.push({from:t,to:e,message:n.text,wrap:void 0===n.wrap&&er()||!!n.wrap,type:r}),!0},er=function(){return Jn},nr={SOLID:0,DOTTED:1,NOTE:2,SOLID_CROSS:3,DOTTED_CROSS:4,SOLID_OPEN:5,DOTTED_OPEN:6,LOOP_START:10,LOOP_END:11,ALT_START:12,ALT_ELSE:13,ALT_END:14,OPT_START:15,OPT_END:16,ACTIVE_START:17,ACTIVE_END:18,PAR_START:19,PAR_AND:20,PAR_END:21,RECT_START:22,RECT_END:23},rr=function(t,e,n){var r={actor:t,placement:e,message:n.text,wrap:void 0===n.wrap&&er()||!!n.wrap},i=[].concat(t,t);Gn.push(r),Hn.push({from:i[0],to:i[1],message:n.text,wrap:void 0===n.wrap&&er()||!!n.wrap,type:nr.NOTE,placement:e})},ir=function(t){qn=t.text,Xn=void 0===t.wrap&&er()||!!t.wrap},ar={addActor:Qn,addMessage:function(t,e,n,r){Hn.push({from:t,to:e,message:n.text,wrap:void 0===n.wrap&&er()||!!n.wrap,answer:r})},addSignal:tr,autoWrap:er,setWrap:function(t){Jn=t},enableSequenceNumbers:function(){Zn=!0},showSequenceNumbers:function(){return Zn},getMessages:function(){return Hn},getActors:function(){return Vn},getActor:function(t){return Vn[t]},getActorKeys:function(){return Object.keys(Vn)},getTitle:function(){return qn},parseDirective:function(t,e,n){$o.parseDirective(this,t,e,n)},getConfig:function(){return xt().sequence},getTitleWrapped:function(){return Xn},clear:function(){Vn={},Hn=[]},parseMessage:function(t){var e=t.trim(),n={text:e.replace(/^[:]?(?:no)?wrap:/,"").trim(),wrap:null===e.match(/^[:]?(?:no)?wrap:/)?x.hasBreaks(e)||void 0:null!==e.match(/^[:]?wrap:/)||null===e.match(/^[:]?nowrap:/)&&void 0};return f.debug("parseMessage:",n),n},LINETYPE:nr,ARROWTYPE:{FILLED:0,OPEN:1},PLACEMENT:{LEFTOF:0,RIGHTOF:1,OVER:2},addNote:rr,setTitle:ir,apply:function t(e){if(e instanceof Array)e.forEach((function(e){t(e)}));else switch(e.type){case"addActor":Qn(e.actor,e.actor,e.description);break;case"activeStart":case"activeEnd":tr(e.actor,void 0,void 0,e.signalType);break;case"addNote":rr(e.actor,e.placement,e.text);break;case"addMessage":tr(e.from,e.to,e.msg,e.signalType);break;case"loopStart":tr(void 0,void 0,e.loopText,e.signalType);break;case"loopEnd":tr(void 0,void 0,void 0,e.signalType);break;case"rectStart":tr(void 0,void 0,e.color,e.signalType);break;case"rectEnd":tr(void 0,void 0,void 0,e.signalType);break;case"optStart":tr(void 0,void 0,e.optText,e.signalType);break;case"optEnd":tr(void 0,void 0,void 0,e.signalType);break;case"altStart":case"else":tr(void 0,void 0,e.altText,e.signalType);break;case"altEnd":tr(void 0,void 0,void 0,e.signalType);break;case"setTitle":ir(e.text);break;case"parStart":case"and":tr(void 0,void 0,e.parText,e.signalType);break;case"parEnd":tr(void 0,void 0,void 0,e.signalType)}}};Un.parser.yy=ar;var or={},sr={data:{startx:void 0,stopx:void 0,starty:void 0,stopy:void 0},verticalPos:0,sequenceItems:[],activations:[],models:{getHeight:function(){return Math.max.apply(null,0===this.actors.length?[0]:this.actors.map((function(t){return t.height||0})))+(0===this.loops.length?0:this.loops.map((function(t){return t.height||0})).reduce((function(t,e){return t+e})))+(0===this.messages.length?0:this.messages.map((function(t){return t.height||0})).reduce((function(t,e){return t+e})))+(0===this.notes.length?0:this.notes.map((function(t){return t.height||0})).reduce((function(t,e){return t+e})))},clear:function(){this.actors=[],this.loops=[],this.messages=[],this.notes=[]},addActor:function(t){this.actors.push(t)},addLoop:function(t){this.loops.push(t)},addMessage:function(t){this.messages.push(t)},addNote:function(t){this.notes.push(t)},lastActor:function(){return this.actors[this.actors.length-1]},lastLoop:function(){return this.loops[this.loops.length-1]},lastMessage:function(){return this.messages[this.messages.length-1]},lastNote:function(){return this.notes[this.notes.length-1]},actors:[],loops:[],messages:[],notes:[]},init:function(){this.sequenceItems=[],this.activations=[],this.models.clear(),this.data={startx:void 0,stopx:void 0,starty:void 0,stopy:void 0},this.verticalPos=0,fr(Un.parser.yy.getConfig())},updateVal:function(t,e,n,r){void 0===t[e]?t[e]=n:t[e]=r(n,t[e])},updateBounds:function(t,e,n,r){var i=this,a=0;function o(o){return function(s){a++;var c=i.sequenceItems.length-a+1;i.updateVal(s,"starty",e-c*or.boxMargin,Math.min),i.updateVal(s,"stopy",r+c*or.boxMargin,Math.max),i.updateVal(sr.data,"startx",t-c*or.boxMargin,Math.min),i.updateVal(sr.data,"stopx",n+c*or.boxMargin,Math.max),"activation"!==o&&(i.updateVal(s,"startx",t-c*or.boxMargin,Math.min),i.updateVal(s,"stopx",n+c*or.boxMargin,Math.max),i.updateVal(sr.data,"starty",e-c*or.boxMargin,Math.min),i.updateVal(sr.data,"stopy",r+c*or.boxMargin,Math.max))}}this.sequenceItems.forEach(o()),this.activations.forEach(o("activation"))},insert:function(t,e,n,r){var i=Math.min(t,n),a=Math.max(t,n),o=Math.min(e,r),s=Math.max(e,r);this.updateVal(sr.data,"startx",i,Math.min),this.updateVal(sr.data,"starty",o,Math.min),this.updateVal(sr.data,"stopx",a,Math.max),this.updateVal(sr.data,"stopy",s,Math.max),this.updateBounds(i,o,a,s)},newActivation:function(t,e,n){var r=n[t.from.actor],i=dr(t.from.actor).length||0,a=r.x+r.width/2+(i-1)*or.activationWidth/2;this.activations.push({startx:a,starty:this.verticalPos+2,stopx:a+or.activationWidth,stopy:void 0,actor:t.from.actor,anchored:zn.anchorElement(e)})},endActivation:function(t){var e=this.activations.map((function(t){return t.actor})).lastIndexOf(t.from.actor);return this.activations.splice(e,1)[0]},createLoop:function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{message:void 0,wrap:!1,width:void 0},e=arguments.length>1?arguments[1]:void 0;return{startx:void 0,starty:this.verticalPos,stopx:void 0,stopy:void 0,title:t.message,wrap:t.wrap,width:t.width,height:0,fill:e}},newLoop:function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{message:void 0,wrap:!1,width:void 0},e=arguments.length>1?arguments[1]:void 0;this.sequenceItems.push(this.createLoop(t,e))},endLoop:function(){return this.sequenceItems.pop()},addSectionToLoop:function(t){var e=this.sequenceItems.pop();e.sections=e.sections||[],e.sectionTitles=e.sectionTitles||[],e.sections.push({y:sr.getVerticalPos(),height:0}),e.sectionTitles.push(t),this.sequenceItems.push(e)},bumpVerticalPos:function(t){this.verticalPos=this.verticalPos+t,this.data.stopy=this.verticalPos},getVerticalPos:function(){return this.verticalPos},getBounds:function(){return{bounds:this.data,models:this.models}}},cr=function(t){return{fontFamily:t.messageFontFamily,fontSize:t.messageFontSize,fontWeight:t.messageFontWeight}},ur=function(t){return{fontFamily:t.noteFontFamily,fontSize:t.noteFontSize,fontWeight:t.noteFontWeight}},lr=function(t){return{fontFamily:t.actorFontFamily,fontSize:t.actorFontSize,fontWeight:t.actorFontWeight}},hr=function(t,e,n,r){for(var i=0,a=0,o=0;o0&&o.forEach((function(r){if(n=r,i.startx===i.stopx){var a=e[t.from],o=e[t.to];n.from=Math.min(a.x-i.width/2,a.x-a.width/2,n.from),n.to=Math.max(o.x+i.width/2,o.x+a.width/2,n.to),n.width=Math.max(n.width,Math.abs(n.to-n.from))-or.labelBoxWidth}else n.from=Math.min(i.startx,n.from),n.to=Math.max(i.stopx,n.to),n.width=Math.max(n.width,i.width)-or.labelBoxWidth})))})),sr.activations=[],f.debug("Loop type widths:",a),a},br={bounds:sr,drawActors:hr,setConf:fr,draw:function(t,e){or=xt().sequence,Un.parser.yy.clear(),Un.parser.yy.setWrap(or.wrap),Un.parser.parse(t+"\n"),sr.init(),f.debug("C:".concat(JSON.stringify(or,null,2)));var n=Object(s.select)('[id="'.concat(e,'"]')),r=Un.parser.yy.getActors(),i=Un.parser.yy.getActorKeys(),a=Un.parser.yy.getMessages(),o=Un.parser.yy.getTitle(),c=yr(r,a);or.height=vr(r,c),hr(n,r,i,0);var u=mr(a,r,c);zn.insertArrowHead(n),zn.insertArrowCrossHead(n),zn.insertSequenceNumber(n);var l=1;a.forEach((function(t){var e,i,a;switch(t.type){case Un.parser.yy.LINETYPE.NOTE:i=t.noteModel,function(t,e){sr.bumpVerticalPos(or.boxMargin),e.height=or.boxMargin,e.starty=sr.getVerticalPos();var n=zn.getNoteRect();n.x=e.startx,n.y=e.starty,n.width=e.width||or.width,n.class="note";var r=t.append("g"),i=zn.drawRect(r,n),a=zn.getTextObj();a.x=e.startx,a.y=e.starty,a.width=n.width,a.dy="1em",a.text=e.message,a.class="noteText",a.fontFamily=or.noteFontFamily,a.fontSize=or.noteFontSize,a.fontWeight=or.noteFontWeight,a.anchor=or.noteAlign,a.textMargin=or.noteMargin,a.valign=or.noteAlign,a.wrap=!0;var o=Fn(r,a),s=Math.round(o.map((function(t){return(t._groups||t)[0][0].getBBox().height})).reduce((function(t,e){return t+e})));i.attr("height",s+2*or.noteMargin),e.height+=s+2*or.noteMargin,sr.bumpVerticalPos(s+2*or.noteMargin),e.stopy=e.starty+s+2*or.noteMargin,e.stopx=e.startx+n.width,sr.insert(e.startx,e.starty,e.stopx,e.stopy),sr.models.addNote(e)}(n,i);break;case Un.parser.yy.LINETYPE.ACTIVE_START:sr.newActivation(t,n,r);break;case Un.parser.yy.LINETYPE.ACTIVE_END:!function(t,e){var r=sr.endActivation(t);r.starty+18>e&&(r.starty=e-6,e+=12),zn.drawActivation(n,r,e,or,dr(t.from.actor).length),sr.insert(r.startx,e-10,r.stopx,e)}(t,sr.getVerticalPos());break;case Un.parser.yy.LINETYPE.LOOP_START:gr(u,t,or.boxMargin,or.boxMargin+or.boxTextMargin,(function(t){return sr.newLoop(t)}));break;case Un.parser.yy.LINETYPE.LOOP_END:e=sr.endLoop(),zn.drawLoop(n,e,"loop",or),sr.bumpVerticalPos(e.stopy-sr.getVerticalPos()),sr.models.addLoop(e);break;case Un.parser.yy.LINETYPE.RECT_START:gr(u,t,or.boxMargin,or.boxMargin,(function(t){return sr.newLoop(void 0,t.message)}));break;case Un.parser.yy.LINETYPE.RECT_END:e=sr.endLoop(),zn.drawBackgroundRect(n,e),sr.models.addLoop(e),sr.bumpVerticalPos(e.stopy-sr.getVerticalPos());break;case Un.parser.yy.LINETYPE.OPT_START:gr(u,t,or.boxMargin,or.boxMargin+or.boxTextMargin,(function(t){return sr.newLoop(t)}));break;case Un.parser.yy.LINETYPE.OPT_END:e=sr.endLoop(),zn.drawLoop(n,e,"opt",or),sr.bumpVerticalPos(e.stopy-sr.getVerticalPos()),sr.models.addLoop(e);break;case Un.parser.yy.LINETYPE.ALT_START:gr(u,t,or.boxMargin,or.boxMargin+or.boxTextMargin,(function(t){return sr.newLoop(t)}));break;case Un.parser.yy.LINETYPE.ALT_ELSE:gr(u,t,or.boxMargin+or.boxTextMargin,or.boxMargin,(function(t){return sr.addSectionToLoop(t)}));break;case Un.parser.yy.LINETYPE.ALT_END:e=sr.endLoop(),zn.drawLoop(n,e,"alt",or),sr.bumpVerticalPos(e.stopy-sr.getVerticalPos()),sr.models.addLoop(e);break;case Un.parser.yy.LINETYPE.PAR_START:gr(u,t,or.boxMargin,or.boxMargin+or.boxTextMargin,(function(t){return sr.newLoop(t)}));break;case Un.parser.yy.LINETYPE.PAR_AND:gr(u,t,or.boxMargin+or.boxTextMargin,or.boxMargin,(function(t){return sr.addSectionToLoop(t)}));break;case Un.parser.yy.LINETYPE.PAR_END:e=sr.endLoop(),zn.drawLoop(n,e,"par",or),sr.bumpVerticalPos(e.stopy-sr.getVerticalPos()),sr.models.addLoop(e);break;default:try{(a=t.msgModel).starty=sr.getVerticalPos(),a.sequenceIndex=l,function(t,e){sr.bumpVerticalPos(10);var n=e.startx,r=e.stopx,i=e.starty,a=e.message,o=e.type,s=e.sequenceIndex,c=e.wrap,u=x.splitBreaks(a).length,l=W.calculateTextDimensions(a,cr(or)),h=l.height/u;e.height+=h,sr.bumpVerticalPos(h);var f=zn.getTextObj();f.x=n,f.y=i+10,f.width=r-n,f.class="messageText",f.dy="1em",f.text=a,f.fontFamily=or.messageFontFamily,f.fontSize=or.messageFontSize,f.fontWeight=or.messageFontWeight,f.anchor=or.messageAlign,f.valign=or.messageAlign,f.textMargin=or.wrapPadding,f.tspan=!1,f.wrap=c,Fn(t,f);var d,p,g=l.height-10,y=l.width;if(n===r){p=sr.getVerticalPos()+g,or.rightAngles?d=t.append("path").attr("d","M ".concat(n,",").concat(p," H ").concat(n+Math.max(or.width/2,y/2)," V ").concat(p+25," H ").concat(n)):(g+=or.boxMargin,p=sr.getVerticalPos()+g,d=t.append("path").attr("d","M "+n+","+p+" C "+(n+60)+","+(p-10)+" "+(n+60)+","+(p+30)+" "+n+","+(p+20))),g+=30;var v=Math.max(y/2,or.width/2);sr.insert(n-v,sr.getVerticalPos()-10+g,r+v,sr.getVerticalPos()+30+g)}else g+=or.boxMargin,p=sr.getVerticalPos()+g,(d=t.append("line")).attr("x1",n),d.attr("y1",p),d.attr("x2",r),d.attr("y2",p),sr.insert(n,p-10,r,p);o===Un.parser.yy.LINETYPE.DOTTED||o===Un.parser.yy.LINETYPE.DOTTED_CROSS||o===Un.parser.yy.LINETYPE.DOTTED_OPEN?(d.style("stroke-dasharray","3, 3"),d.attr("class","messageLine1")):d.attr("class","messageLine0");var m="";or.arrowMarkerAbsolute&&(m=(m=(m=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search).replace(/\(/g,"\\(")).replace(/\)/g,"\\)")),d.attr("stroke-width",2),d.attr("stroke","none"),d.style("fill","none"),o!==Un.parser.yy.LINETYPE.SOLID&&o!==Un.parser.yy.LINETYPE.DOTTED||d.attr("marker-end","url("+m+"#arrowhead)"),o!==Un.parser.yy.LINETYPE.SOLID_CROSS&&o!==Un.parser.yy.LINETYPE.DOTTED_CROSS||d.attr("marker-end","url("+m+"#crosshead)"),(ar.showSequenceNumbers()||or.showSequenceNumbers)&&(d.attr("marker-start","url("+m+"#sequencenumber)"),t.append("text").attr("x",n).attr("y",p+4).attr("font-family","sans-serif").attr("font-size","12px").attr("text-anchor","middle").attr("textLength","16px").attr("class","sequenceNumber").text(s)),sr.bumpVerticalPos(g),e.height+=g,e.stopy=e.starty+e.height,sr.insert(e.fromBounds,e.starty,e.toBounds,e.stopy)}(n,a),sr.models.addMessage(a)}catch(t){f.error("error while drawing message",t)}}[Un.parser.yy.LINETYPE.SOLID_OPEN,Un.parser.yy.LINETYPE.DOTTED_OPEN,Un.parser.yy.LINETYPE.SOLID,Un.parser.yy.LINETYPE.DOTTED,Un.parser.yy.LINETYPE.SOLID_CROSS,Un.parser.yy.LINETYPE.DOTTED_CROSS].includes(t.type)&&l++})),or.mirrorActors&&(sr.bumpVerticalPos(2*or.boxMargin),hr(n,r,i,sr.getVerticalPos()));var h=sr.getBounds().bounds;f.debug("For line height fix Querying: #"+e+" .actor-line"),Object(s.selectAll)("#"+e+" .actor-line").attr("y2",h.stopy);var d=h.stopy-h.starty+2*or.diagramMarginY;or.mirrorActors&&(d=d-or.boxMargin+or.bottomMarginAdj);var p=h.stopx-h.startx+2*or.diagramMarginX;o&&n.append("text").text(o).attr("x",(h.stopx-h.startx)/2-2*or.diagramMarginX).attr("y",-25),$(n,d,p,or.useMaxWidth);var g=o?40:0;n.attr("viewBox",h.startx-or.diagramMarginX+" -"+(or.diagramMarginY+g)+" "+p+" "+(d+g)),f.debug("models:",sr.models)}},xr=n(27),_r=n.n(xr);function kr(t){return function(t){if(Array.isArray(t)){for(var e=0,n=new Array(t.length);e=6&&n.indexOf("weekends")>=0||(n.indexOf(t.format("dddd").toLowerCase())>=0||n.indexOf(t.format(e.trim()))>=0)},jr=function(t,e,n){if(n.length&&!t.manualEndTime){var r=l()(t.startTime,e,!0);r.add(1,"d");var i=l()(t.endTime,e,!0),a=Rr(r,i,e,n);t.endTime=i.toDate(),t.renderEndTime=a}},Rr=function(t,e,n,r){for(var i=!1,a=null;t<=e;)i||(a=e.toDate()),(i=Ir(t,n,r))&&e.add(1,"d"),t.add(1,"d");return a},Yr=function(t,e,n){n=n.trim();var r=/^after\s+([\d\w- ]+)/.exec(n.trim());if(null!==r){var i=null;if(r[1].split(" ").forEach((function(t){var e=Gr(t);void 0!==e&&(i?e.endTime>i.endTime&&(i=e):i=e)})),i)return i.endTime;var a=new Date;return a.setHours(0,0,0,0),a}var o=l()(n,e.trim(),!0);return o.isValid()?o.toDate():(f.debug("Invalid date:"+n),f.debug("With date format:"+e.trim()),new Date)},zr=function(t,e){if(null!==t)switch(t[2]){case"s":e.add(t[1],"seconds");break;case"m":e.add(t[1],"minutes");break;case"h":e.add(t[1],"hours");break;case"d":e.add(t[1],"days");break;case"w":e.add(t[1],"weeks")}return e.toDate()},Ur=function(t,e,n,r){r=r||!1,n=n.trim();var i=l()(n,e.trim(),!0);return i.isValid()?(r&&i.add(1,"d"),i.toDate()):zr(/^([\d]+)([wdhms])/.exec(n.trim()),l()(t))},$r=0,Wr=function(t){return void 0===t?"task"+($r+=1):t},Vr=[],Hr={},Gr=function(t){var e=Hr[t];return Vr[e]},qr=function(){for(var t=function(t){var e=Vr[t],n="";switch(Vr[t].raw.startTime.type){case"prevTaskEnd":var r=Gr(e.prevTaskId);e.startTime=r.endTime;break;case"getStartDate":(n=Yr(0,Tr,Vr[t].raw.startTime.startData))&&(Vr[t].startTime=n)}return Vr[t].startTime&&(Vr[t].endTime=Ur(Vr[t].startTime,Tr,Vr[t].raw.endTime.data,Fr),Vr[t].endTime&&(Vr[t].processed=!0,Vr[t].manualEndTime=l()(Vr[t].raw.endTime.data,"YYYY-MM-DD",!0).isValid(),jr(Vr[t],Tr,Ar))),Vr[t].processed},e=!0,n=0;nr?i=1:n0&&(e=t.classes.join(" "));for(var n=0,r=0;rn-e?n+a+1.5*ti.leftPadding>u?e+r-5:n+r+5:(n-e)/2+e+r})).attr("y",(function(t,r){return t.order*e+ti.barHeight/2+(ti.fontSize/2-2)+n})).attr("text-height",i).attr("class",(function(t){var e=o(t.startTime),n=o(t.endTime);t.milestone&&(n=e+i);var r=this.getBBox().width,a="";t.classes.length>0&&(a=t.classes.join(" "));for(var s=0,l=0;ln-e?n+r+1.5*ti.leftPadding>u?a+" taskTextOutsideLeft taskTextOutside"+s+" "+h:a+" taskTextOutsideRight taskTextOutside"+s+" "+h+" width-"+r:a+" taskText taskText"+s+" "+h+" width-"+r}))}(t,i,u,f,r,0,e),function(t,e){for(var n=[],r=0,i=0;i0&&a.setAttribute("dy","1em"),a.textContent=e[i],r.appendChild(a)}return r})).attr("x",10).attr("y",(function(i,a){if(!(a>0))return i[1]*t/2+e;for(var o=0;o "+t.w+": "+JSON.stringify(i.edge(t))),yn(r,i.edge(t),i.edge(t).relation,oi))}));var h=r.node().getBBox(),d=h.width+40,p=h.height+40;$(r,p,d,oi.useMaxWidth);var g="".concat(h.x-20," ").concat(h.y-20," ").concat(d," ").concat(p);f.debug("viewBox ".concat(g)),r.attr("viewBox",g)};ri.parser.yy=on;var li={dividerMargin:10,padding:5,textHeight:10},hi=function(t){Object.keys(t).forEach((function(e){li[e]=t[e]}))},fi=function(t,e){f.info("Drawing class"),on.clear(),ri.parser.parse(t);var n=xt().flowchart;f.info("config:",n);var r=n.nodeSpacing||50,i=n.rankSpacing||50,a=new H.a.Graph({multigraph:!0,compound:!0}).setGraph({rankdir:"TD",nodesep:r,ranksep:i,marginx:8,marginy:8}).setDefaultEdgeLabel((function(){return{}})),o=on.getClasses(),c=on.getRelations();f.info(c),function(t,e){var n=Object.keys(t);f.info("keys:",n),f.info(t),n.forEach((function(n){var r=t[n],i="";r.cssClasses.length>0&&(i=i+" "+r.cssClasses.join(" "));var a={labelStyle:""},o=void 0!==r.text?r.text:r.id,s="";switch(r.type){case"class":s="class_box";break;default:s="class_box"}e.setNode(r.id,{labelStyle:a.labelStyle,shape:s,labelText:o,classData:r,rx:0,ry:0,class:i,style:a.style,id:r.id,domId:r.domId,haveCallback:r.haveCallback,link:r.link,width:"group"===r.type?500:void 0,type:r.type,padding:xt().flowchart.padding}),f.info("setNode",{labelStyle:a.labelStyle,shape:s,labelText:o,rx:0,ry:0,class:i,style:a.style,id:r.id,width:"group"===r.type?500:void 0,type:r.type,padding:xt().flowchart.padding})}))}(o,a),function(t,e){var n=0;t.forEach((function(r){n++;var i={classes:"relation"};i.pattern=1==r.relation.lineType?"dashed":"solid",i.id="id"+n,"arrow_open"===r.type?i.arrowhead="none":i.arrowhead="normal",f.info(i,r),i.startLabelRight="none"===r.relationTitle1?"":r.relationTitle1,i.endLabelLeft="none"===r.relationTitle2?"":r.relationTitle2,i.arrowTypeStart=di(r.relation.type1),i.arrowTypeEnd=di(r.relation.type2);var a="",o="";if(void 0!==r.style){var c=N(r.style);a=c.style,o=c.labelStyle}else a="fill:none";i.style=a,i.labelStyle=o,void 0!==r.interpolate?i.curve=O(r.interpolate,s.curveLinear):void 0!==t.defaultInterpolate?i.curve=O(t.defaultInterpolate,s.curveLinear):i.curve=O(li.curve,s.curveLinear),r.text=r.title,void 0===r.text?void 0!==r.style&&(i.arrowheadStyle="fill: #333"):(i.arrowheadStyle="fill: #333",i.labelpos="c",xt().flowchart.htmlLabels,i.labelType="text",i.label=r.text.replace(x.lineBreakRegex,"\n"),void 0===r.style&&(i.style=i.style||"stroke: #333; stroke-width: 1.5px;fill:none"),i.labelStyle=i.labelStyle.replace("color:","fill:")),e.setEdge(r.id1,r.id2,i,n)}))}(c,a);var u=Object(s.select)('[id="'.concat(e,'"]'));u.attr("xmlns:xlink","http://www.w3.org/1999/xlink");var l=Object(s.select)("#"+e+" g");An(l,a,["aggregation","extension","composition","dependency"],"classDiagram",e);var h=u.node().getBBox(),d=h.width+16,p=h.height+16;if(f.debug("new ViewBox 0 0 ".concat(d," ").concat(p),"translate(".concat(8-a._label.marginx,", ").concat(8-a._label.marginy,")")),$(u,p,d,n.useMaxWidth),u.attr("viewBox","0 0 ".concat(d," ").concat(p)),u.select("g").attr("transform","translate(".concat(8-a._label.marginx,", ").concat(8-h.y,")")),!n.htmlLabels)for(var g=document.querySelectorAll('[id="'+e+'"] .edgeLabel .label'),y=0;y0&&o.length>0){var c={stmt:"state",id:L(),type:"divider",doc:yi(o)};i.push(yi(c)),n.doc=i}n.doc.forEach((function(e){return t(n,e,!0)}))}}({id:"root"},{id:"root",doc:vi},!0),{id:"root",doc:vi}},extract:function(t){var e;e=t.doc?t.doc:t,f.info(e),ki(),f.info("Extract",e),e.forEach((function(t){"state"===t.stmt&&_i(t.id,t.type,t.doc,t.description,t.note),"relation"===t.stmt&&wi(t.state1.id,t.state2.id,t.description)}))},trimColon:function(t){return t&&":"===t[0]?t.substr(1).trim():t.trim()}},Ai=n(22),Mi=n.n(Ai),Oi={},Di=function(t,e){Oi[t]=e},Ni=function(t,e){var n=t.append("text").attr("x",2*xt().state.padding).attr("y",xt().state.textHeight+1.3*xt().state.padding).attr("font-size",xt().state.fontSize).attr("class","state-title").text(e.descriptions[0]).node().getBBox(),r=n.height,i=t.append("text").attr("x",xt().state.padding).attr("y",r+.4*xt().state.padding+xt().state.dividerMargin+xt().state.textHeight).attr("class","state-description"),a=!0,o=!0;e.descriptions.forEach((function(t){a||(!function(t,e,n){var r=t.append("tspan").attr("x",2*xt().state.padding).text(e);n||r.attr("dy",xt().state.textHeight)}(i,t,o),o=!1),a=!1}));var s=t.append("line").attr("x1",xt().state.padding).attr("y1",xt().state.padding+r+xt().state.dividerMargin/2).attr("y2",xt().state.padding+r+xt().state.dividerMargin/2).attr("class","descr-divider"),c=i.node().getBBox(),u=Math.max(c.width,n.width);return s.attr("x2",u+3*xt().state.padding),t.insert("rect",":first-child").attr("x",xt().state.padding).attr("y",xt().state.padding).attr("width",u+2*xt().state.padding).attr("height",c.height+r+2*xt().state.padding).attr("rx",xt().state.radius),t},Bi=function(t,e,n){var r,i=xt().state.padding,a=2*xt().state.padding,o=t.node().getBBox(),s=o.width,c=o.x,u=t.append("text").attr("x",0).attr("y",xt().state.titleShift).attr("font-size",xt().state.fontSize).attr("class","state-title").text(e.id),l=u.node().getBBox().width+a,h=Math.max(l,s);h===s&&(h+=a);var f=t.node().getBBox();e.doc,r=c-i,l>s&&(r=(s-h)/2+i),Math.abs(c-f.x)s&&(r=c-(l-s)/2);var d=1-xt().state.textHeight;return t.insert("rect",":first-child").attr("x",r).attr("y",d).attr("class",n?"alt-composit":"composit").attr("width",h).attr("height",f.height+xt().state.textHeight+xt().state.titleShift+1).attr("rx","0"),u.attr("x",r+i),l<=s&&u.attr("x",c+(h-a)/2-l/2+i),t.insert("rect",":first-child").attr("x",r).attr("y",xt().state.titleShift-xt().state.textHeight-xt().state.padding).attr("width",h).attr("height",3*xt().state.textHeight).attr("rx",xt().state.radius),t.insert("rect",":first-child").attr("x",r).attr("y",xt().state.titleShift-xt().state.textHeight-xt().state.padding).attr("width",h).attr("height",f.height+3+2*xt().state.textHeight).attr("rx",xt().state.radius),t},Li=function(t,e){e.attr("class","state-note");var n=e.append("rect").attr("x",0).attr("y",xt().state.padding),r=function(t,e,n,r){var i=0,a=r.append("text");a.style("text-anchor","start"),a.attr("class","noteText");var o=t.replace(/\r\n/g,"
"),s=(o=o.replace(/\n/g,"
")).split(x.lineBreakRegex),c=1.25*xt().state.noteMargin,u=!0,l=!1,h=void 0;try{for(var f,d=s[Symbol.iterator]();!(u=(f=d.next()).done);u=!0){var p=f.value.trim();if(p.length>0){var g=a.append("tspan");if(g.text(p),0===c)c+=g.node().getBBox().height;i+=c,g.attr("x",e+xt().state.noteMargin),g.attr("y",n+i+1.25*xt().state.noteMargin)}}}catch(t){l=!0,h=t}finally{try{u||null==d.return||d.return()}finally{if(l)throw h}}return{textWidth:a.node().getBBox().width,textHeight:i}}(t,0,0,e.append("g")),i=r.textWidth,a=r.textHeight;return n.attr("height",a+2*xt().state.noteMargin),n.attr("width",i+2*xt().state.noteMargin),n},Fi=function(t,e){var n=e.id,r={id:n,label:e.id,width:0,height:0},i=t.append("g").attr("id",n).attr("class","stateGroup");"start"===e.type&&function(t){t.append("circle").attr("class","start-state").attr("r",xt().state.sizeUnit).attr("cx",xt().state.padding+xt().state.sizeUnit).attr("cy",xt().state.padding+xt().state.sizeUnit)}(i),"end"===e.type&&function(t){t.append("circle").attr("class","end-state-outer").attr("r",xt().state.sizeUnit+xt().state.miniPadding).attr("cx",xt().state.padding+xt().state.sizeUnit+xt().state.miniPadding).attr("cy",xt().state.padding+xt().state.sizeUnit+xt().state.miniPadding),t.append("circle").attr("class","end-state-inner").attr("r",xt().state.sizeUnit).attr("cx",xt().state.padding+xt().state.sizeUnit+2).attr("cy",xt().state.padding+xt().state.sizeUnit+2)}(i),"fork"!==e.type&&"join"!==e.type||function(t,e){var n=xt().state.forkWidth,r=xt().state.forkHeight;if(e.parentId){var i=n;n=r,r=i}t.append("rect").style("stroke","black").style("fill","black").attr("width",n).attr("height",r).attr("x",xt().state.padding).attr("y",xt().state.padding)}(i,e),"note"===e.type&&Li(e.note.text,i),"divider"===e.type&&function(t){t.append("line").style("stroke","grey").style("stroke-dasharray","3").attr("x1",xt().state.textHeight).attr("class","divider").attr("x2",2*xt().state.textHeight).attr("y1",0).attr("y2",0)}(i),"default"===e.type&&0===e.descriptions.length&&function(t,e){var n=t.append("text").attr("x",2*xt().state.padding).attr("y",xt().state.textHeight+2*xt().state.padding).attr("font-size",xt().state.fontSize).attr("class","state-title").text(e.id),r=n.node().getBBox();t.insert("rect",":first-child").attr("x",xt().state.padding).attr("y",xt().state.padding).attr("width",r.width+2*xt().state.padding).attr("height",r.height+2*xt().state.padding).attr("rx",xt().state.radius)}(i,e),"default"===e.type&&e.descriptions.length>0&&Ni(i,e);var a=i.node().getBBox();return r.width=a.width+2*xt().state.padding,r.height=a.height+2*xt().state.padding,Di(n,r),r},Pi=0;Ai.parser.yy=Si;var Ii={},ji=function t(e,n,r,i){var a,o=new H.a.Graph({compound:!0,multigraph:!0}),c=!0;for(a=0;a "+t.w+": "+JSON.stringify(o.edge(t))),function(t,e,n){e.points=e.points.filter((function(t){return!Number.isNaN(t.y)}));var r=e.points,i=Object(s.line)().x((function(t){return t.x})).y((function(t){return t.y})).curve(s.curveBasis),a=t.append("path").attr("d",i(r)).attr("id","edge"+Pi).attr("class","transition"),o="";if(xt().state.arrowMarkerAbsolute&&(o=(o=(o=window.location.protocol+"//"+window.location.host+window.location.pathname+window.location.search).replace(/\(/g,"\\(")).replace(/\)/g,"\\)")),a.attr("marker-end","url("+o+"#"+function(t){switch(t){case Si.relationType.AGGREGATION:return"aggregation";case Si.relationType.EXTENSION:return"extension";case Si.relationType.COMPOSITION:return"composition";case Si.relationType.DEPENDENCY:return"dependency"}}(Si.relationType.DEPENDENCY)+"End)"),void 0!==n.title){for(var c=t.append("g").attr("class","stateLabel"),u=W.calcLabelPosition(e.points),l=u.x,h=u.y,d=x.getRows(n.title),p=0,g=[],y=0,v=0,m=0;m<=d.length;m++){var b=c.append("text").attr("text-anchor","middle").text(d[m]).attr("x",l).attr("y",h+p),_=b.node().getBBox();if(y=Math.max(y,_.width),v=Math.min(v,_.x),f.info(_.x,l,h+p),0===p){var k=b.node().getBBox();p=k.height,f.info("Title height",p,h)}g.push(b)}var w=p*d.length;if(d.length>1){var E=(d.length-1)*p*.5;g.forEach((function(t,e){return t.attr("y",h+e*p-E)})),w=p*d.length}var T=c.node().getBBox();c.insert("rect",":first-child").attr("class","box").attr("x",l-y/2-xt().state.padding/2).attr("y",h-w/2-xt().state.padding/2-3.5).attr("width",y+xt().state.padding).attr("height",w+xt().state.padding),f.info(T)}Pi++}(n,o.edge(t),o.edge(t).relation))})),w=k.getBBox();var E={id:r||"root",label:r||"root",width:0,height:0};return E.width=w.width+2*gi.padding,E.height=w.height+2*gi.padding,f.debug("Doc rendered",E,o),E},Ri=function(){},Yi=function(t,e){gi=xt().state,Ai.parser.yy.clear(),Ai.parser.parse(t),f.debug("Rendering diagram "+t);var n=Object(s.select)("[id='".concat(e,"']"));n.append("defs").append("marker").attr("id","dependencyEnd").attr("refX",19).attr("refY",7).attr("markerWidth",20).attr("markerHeight",28).attr("orient","auto").append("path").attr("d","M 19,7 L9,13 L14,7 L9,1 Z"),new H.a.Graph({multigraph:!0,compound:!0,rankdir:"RL"}).setDefaultEdgeLabel((function(){return{}}));var r=Si.getRootDoc();ji(r,n,void 0,!1);var i=gi.padding,a=n.node().getBBox(),o=a.width+2*i,c=a.height+2*i;$(n,c,1.75*o,gi.useMaxWidth),n.attr("viewBox","".concat(a.x-gi.padding," ").concat(a.y-gi.padding," ")+o+" "+c)},zi={},Ui={},$i=function(t,e,n,r){if("root"!==n.id){var i="rect";!0===n.start&&(i="start"),!1===n.start&&(i="end"),"default"!==n.type&&(i=n.type),Ui[n.id]||(Ui[n.id]={id:n.id,shape:i,description:n.id,classes:"statediagram-state"}),n.description&&(Array.isArray(Ui[n.id].description)?(Ui[n.id].shape="rectWithTitle",Ui[n.id].description.push(n.description)):Ui[n.id].description.length>0?(Ui[n.id].shape="rectWithTitle",Ui[n.id].description===n.id?Ui[n.id].description=[n.description]:Ui[n.id].description=[Ui[n.id].description,n.description]):(Ui[n.id].shape="rect",Ui[n.id].description=n.description)),!Ui[n.id].type&&n.doc&&(f.info("Setting cluser for ",n.id),Ui[n.id].type="group",Ui[n.id].shape="divider"===n.type?"divider":"roundedWithTitle",Ui[n.id].classes=Ui[n.id].classes+" "+(r?"statediagram-cluster statediagram-cluster-alt":"statediagram-cluster"));var a={labelStyle:"",shape:Ui[n.id].shape,labelText:Ui[n.id].description,classes:Ui[n.id].classes,style:"",id:n.id,domId:"state-"+n.id+"-"+Wi,type:Ui[n.id].type,padding:15};if(n.note){var o={labelStyle:"",shape:"note",labelText:n.note.text,classes:"statediagram-note",style:"",id:n.id+"----note",domId:"state-"+n.id+"----note-"+Wi,type:Ui[n.id].type,padding:15},s={labelStyle:"",shape:"noteGroup",labelText:n.note.text,classes:Ui[n.id].classes,style:"",id:n.id+"----parent",domId:"state-"+n.id+"----parent-"+Wi,type:"group",padding:0};Wi++,t.setNode(n.id+"----parent",s),t.setNode(o.id,o),t.setNode(n.id,a),t.setParent(n.id,n.id+"----parent"),t.setParent(o.id,n.id+"----parent");var c=n.id,u=o.id;"left of"===n.note.position&&(c=o.id,u=n.id),t.setEdge(c,u,{arrowhead:"none",arrowType:"",style:"fill:none",labelStyle:"",classes:"transition note-edge",arrowheadStyle:"fill: #333",labelpos:"c",labelType:"text",thickness:"normal"})}else t.setNode(n.id,a)}e&&"root"!==e.id&&(f.info("Setting node ",n.id," to be child of its parent ",e.id),t.setParent(n.id,e.id)),n.doc&&(f.info("Adding nodes children "),Vi(t,n,n.doc,!r))},Wi=0,Vi=function(t,e,n,r){Wi=0,f.trace("items",n),n.forEach((function(n){if("state"===n.stmt||"default"===n.stmt)$i(t,e,n,r);else if("relation"===n.stmt){$i(t,e,n.state1,r),$i(t,e,n.state2,r);var i={id:"edge"+Wi,arrowhead:"normal",arrowTypeEnd:"arrow_barb",style:"fill:none",labelStyle:"",label:n.description,arrowheadStyle:"fill: #333",labelpos:"c",labelType:"text",thickness:"normal",classes:"transition"},a=n.state1.id,o=n.state2.id;t.setEdge(a,o,i,Wi),Wi++}}))},Hi=function(t){for(var e=Object.keys(t),n=0;ne.seq?t:e}),t[0]),n="";t.forEach((function(t){n+=t===e?"\t*":"\t|"}));var r,i,a,o=[n,e.id,e.seq];for(var s in Zi)Zi[s]===e.id&&o.push(s);if(f.debug(o.join(" ")),Array.isArray(e.parent)){var c=qi[e.parent[0]];ra(t,e,c),t.push(qi[e.parent[1]])}else{if(null==e.parent)return;var u=qi[e.parent];ra(t,e,u)}r=t,i=function(t){return t.id},a=Object.create(null),ia(t=r.reduce((function(t,e){var n=i(e);return a[n]||(a[n]=!0,t.push(e)),t}),[]))}var aa,oa=function(){var t=Object.keys(qi).map((function(t){return qi[t]}));return t.forEach((function(t){f.debug(t.id)})),t.sort((function(t,e){return e.seq-t.seq})),t},sa={setDirection:function(t){Qi=t},setOptions:function(t){f.debug("options str",t),t=(t=t&&t.trim())||"{}";try{na=JSON.parse(t)}catch(t){f.error("error while parsing gitGraph options",t.message)}},getOptions:function(){return na},commit:function(t){var e={id:ta(),message:t,seq:Ki++,parent:null==Xi?null:Xi.id};Xi=e,qi[e.id]=e,Zi[Ji]=e.id,f.debug("in pushCommit "+e.id)},branch:function(t){Zi[t]=null!=Xi?Xi.id:null,f.debug("in createBranch")},merge:function(t){var e=qi[Zi[Ji]],n=qi[Zi[t]];if(function(t,e){return t.seq>e.seq&&ea(e,t)}(e,n))f.debug("Already merged");else{if(ea(e,n))Zi[Ji]=Zi[t],Xi=qi[Zi[Ji]];else{var r={id:ta(),message:"merged branch "+t+" into "+Ji,seq:Ki++,parent:[null==Xi?null:Xi.id,Zi[t]]};Xi=r,qi[r.id]=r,Zi[Ji]=r.id}f.debug(Zi),f.debug("in mergeBranch")}},checkout:function(t){f.debug("in checkout");var e=Zi[Ji=t];Xi=qi[e]},reset:function(t){f.debug("in reset",t);var e=t.split(":")[0],n=parseInt(t.split(":")[1]),r="HEAD"===e?Xi:qi[Zi[e]];for(f.debug(r,n);n>0;)if(n--,!(r=qi[r.parent])){var i="Critical error - unique parent commit not found during reset";throw f.error(i),i}Xi=r,Zi[Ji]=r.id},prettyPrint:function(){f.debug(qi),ia([oa()[0]])},clear:function(){qi={},Zi={master:Xi=null},Ji="master",Ki=0},getBranchesAsObjArray:function(){var t=[];for(var e in Zi)t.push({name:e,commit:qi[Zi[e]]});return t},getBranches:function(){return Zi},getCommits:function(){return qi},getCommitsArray:oa,getCurrentBranch:function(){return Ji},getDirection:function(){return Qi},getHead:function(){return Xi}},ca=n(71),ua=n.n(ca),la={},ha={nodeSpacing:150,nodeFillColor:"yellow",nodeStrokeWidth:2,nodeStrokeColor:"grey",lineStrokeWidth:4,branchOffset:50,lineColor:"grey",leftMargin:50,branchColors:["#442f74","#983351","#609732","#AA9A39"],nodeRadius:10,nodeLabel:{width:75,height:100,x:-25,y:0}},fa={};function da(t,e,n,r){var i=O(r,s.curveBasis),a=ha.branchColors[n%ha.branchColors.length],o=Object(s.line)().x((function(t){return Math.round(t.x)})).y((function(t){return Math.round(t.y)})).curve(i);t.append("svg:path").attr("d",o(e)).style("stroke",a).style("stroke-width",ha.lineStrokeWidth).style("fill","none")}function pa(t,e){e=e||t.node().getBBox();var n=t.node().getCTM();return{left:n.e+e.x*n.a,top:n.f+e.y*n.d,width:e.width,height:e.height}}function ga(t,e,n,r,i){f.debug("svgDrawLineForCommits: ",e,n);var a=pa(t.select("#node-"+e+" circle")),o=pa(t.select("#node-"+n+" circle"));switch(r){case"LR":if(a.left-o.left>ha.nodeSpacing){var s={x:a.left-ha.nodeSpacing,y:o.top+o.height/2};da(t,[s,{x:o.left+o.width,y:o.top+o.height/2}],i,"linear"),da(t,[{x:a.left,y:a.top+a.height/2},{x:a.left-ha.nodeSpacing/2,y:a.top+a.height/2},{x:a.left-ha.nodeSpacing/2,y:s.y},s],i)}else da(t,[{x:a.left,y:a.top+a.height/2},{x:a.left-ha.nodeSpacing/2,y:a.top+a.height/2},{x:a.left-ha.nodeSpacing/2,y:o.top+o.height/2},{x:o.left+o.width,y:o.top+o.height/2}],i);break;case"BT":if(o.top-a.top>ha.nodeSpacing){var c={x:o.left+o.width/2,y:a.top+a.height+ha.nodeSpacing};da(t,[c,{x:o.left+o.width/2,y:o.top}],i,"linear"),da(t,[{x:a.left+a.width/2,y:a.top+a.height},{x:a.left+a.width/2,y:a.top+a.height+ha.nodeSpacing/2},{x:o.left+o.width/2,y:c.y-ha.nodeSpacing/2},c],i)}else da(t,[{x:a.left+a.width/2,y:a.top+a.height},{x:a.left+a.width/2,y:a.top+ha.nodeSpacing/2},{x:o.left+o.width/2,y:o.top-ha.nodeSpacing/2},{x:o.left+o.width/2,y:o.top}],i)}}function ya(t,e){return t.select(e).node().cloneNode(!0)}function va(t,e,n,r){var i,a=Object.keys(la).length;if("string"==typeof e)do{if(i=la[e],f.debug("in renderCommitHistory",i.id,i.seq),t.select("#node-"+e).size()>0)return;t.append((function(){return ya(t,"#def-commit")})).attr("class","commit").attr("id",(function(){return"node-"+i.id})).attr("transform",(function(){switch(r){case"LR":return"translate("+(i.seq*ha.nodeSpacing+ha.leftMargin)+", "+aa*ha.branchOffset+")";case"BT":return"translate("+(aa*ha.branchOffset+ha.leftMargin)+", "+(a-i.seq)*ha.nodeSpacing+")"}})).attr("fill",ha.nodeFillColor).attr("stroke",ha.nodeStrokeColor).attr("stroke-width",ha.nodeStrokeWidth);var o=void 0;for(var s in n)if(n[s].commit===i){o=n[s];break}o&&(f.debug("found branch ",o.name),t.select("#node-"+i.id+" p").append("xhtml:span").attr("class","branch-label").text(o.name+", ")),t.select("#node-"+i.id+" p").append("xhtml:span").attr("class","commit-id").text(i.id),""!==i.message&&"BT"===r&&t.select("#node-"+i.id+" p").append("xhtml:span").attr("class","commit-msg").text(", "+i.message),e=i.parent}while(e&&la[e]);Array.isArray(e)&&(f.debug("found merge commmit",e),va(t,e[0],n,r),aa++,va(t,e[1],n,r),aa--)}function ma(t,e,n,r){for(r=r||0;e.seq>0&&!e.lineDrawn;)"string"==typeof e.parent?(ga(t,e.id,e.parent,n,r),e.lineDrawn=!0,e=la[e.parent]):Array.isArray(e.parent)&&(ga(t,e.id,e.parent[0],n,r),ga(t,e.id,e.parent[1],n,r+1),ma(t,la[e.parent[1]],n,r+1),e.lineDrawn=!0,e=la[e.parent[0]])}var ba,xa=function(t){fa=t},_a=function(t,e,n){try{var r=ua.a.parser;r.yy=sa,r.yy.clear(),f.debug("in gitgraph renderer",t+"\n","id:",e,n),r.parse(t+"\n"),ha=Object.assign(ha,fa,sa.getOptions()),f.debug("effective options",ha);var i=sa.getDirection();la=sa.getCommits();var a=sa.getBranchesAsObjArray();"BT"===i&&(ha.nodeLabel.x=a.length*ha.branchOffset,ha.nodeLabel.width="100%",ha.nodeLabel.y=-2*ha.nodeRadius);var o=Object(s.select)('[id="'.concat(e,'"]'));for(var c in function(t){t.append("defs").append("g").attr("id","def-commit").append("circle").attr("r",ha.nodeRadius).attr("cx",0).attr("cy",0),t.select("#def-commit").append("foreignObject").attr("width",ha.nodeLabel.width).attr("height",ha.nodeLabel.height).attr("x",ha.nodeLabel.x).attr("y",ha.nodeLabel.y).attr("class","node-label").attr("requiredFeatures","http://www.w3.org/TR/SVG11/feature#Extensibility").append("p").html("")}(o),aa=1,a){var u=a[c];va(o,u.commit.id,a,i),ma(o,u.commit,i),aa++}o.attr("height",(function(){return"BT"===i?Object.keys(la).length*ha.nodeSpacing:(a.length+1)*ha.branchOffset}))}catch(t){f.error("Error while rendering gitgraph"),f.error(t.message)}},ka="",wa=!1,Ea={setMessage:function(t){f.debug("Setting message to: "+t),ka=t},getMessage:function(){return ka},setInfo:function(t){wa=t},getInfo:function(){return wa}},Ta=n(72),Ca=n.n(Ta),Sa={},Aa=function(t){Object.keys(t).forEach((function(e){Sa[e]=t[e]}))},Ma=function(t,e,n){try{var r=Ca.a.parser;r.yy=Ea,f.debug("Renering info diagram\n"+t),r.parse(t),f.debug("Parsed info diagram");var i=Object(s.select)("#"+e);i.append("g").append("text").attr("x",100).attr("y",40).attr("class","version").attr("font-size","32px").style("text-anchor","middle").text("v "+n),i.attr("height",100),i.attr("width",400)}catch(t){f.error("Error while rendering info diagram"),f.error(t.message)}},Oa={},Da=function(t){Object.keys(t).forEach((function(e){Oa[e]=t[e]}))},Na=function(t,e){try{f.debug("Renering svg for syntax error\n");var n=Object(s.select)("#"+t),r=n.append("g");r.append("path").attr("class","error-icon").attr("d","m411.313,123.313c6.25-6.25 6.25-16.375 0-22.625s-16.375-6.25-22.625,0l-32,32-9.375,9.375-20.688-20.688c-12.484-12.5-32.766-12.5-45.25,0l-16,16c-1.261,1.261-2.304,2.648-3.31,4.051-21.739-8.561-45.324-13.426-70.065-13.426-105.867,0-192,86.133-192,192s86.133,192 192,192 192-86.133 192-192c0-24.741-4.864-48.327-13.426-70.065 1.402-1.007 2.79-2.049 4.051-3.31l16-16c12.5-12.492 12.5-32.758 0-45.25l-20.688-20.688 9.375-9.375 32.001-31.999zm-219.313,100.687c-52.938,0-96,43.063-96,96 0,8.836-7.164,16-16,16s-16-7.164-16-16c0-70.578 57.422-128 128-128 8.836,0 16,7.164 16,16s-7.164,16-16,16z"),r.append("path").attr("class","error-icon").attr("d","m459.02,148.98c-6.25-6.25-16.375-6.25-22.625,0s-6.25,16.375 0,22.625l16,16c3.125,3.125 7.219,4.688 11.313,4.688 4.094,0 8.188-1.563 11.313-4.688 6.25-6.25 6.25-16.375 0-22.625l-16.001-16z"),r.append("path").attr("class","error-icon").attr("d","m340.395,75.605c3.125,3.125 7.219,4.688 11.313,4.688 4.094,0 8.188-1.563 11.313-4.688 6.25-6.25 6.25-16.375 0-22.625l-16-16c-6.25-6.25-16.375-6.25-22.625,0s-6.25,16.375 0,22.625l15.999,16z"),r.append("path").attr("class","error-icon").attr("d","m400,64c8.844,0 16-7.164 16-16v-32c0-8.836-7.156-16-16-16-8.844,0-16,7.164-16,16v32c0,8.836 7.156,16 16,16z"),r.append("path").attr("class","error-icon").attr("d","m496,96.586h-32c-8.844,0-16,7.164-16,16 0,8.836 7.156,16 16,16h32c8.844,0 16-7.164 16-16 0-8.836-7.156-16-16-16z"),r.append("path").attr("class","error-icon").attr("d","m436.98,75.605c3.125,3.125 7.219,4.688 11.313,4.688 4.094,0 8.188-1.563 11.313-4.688l32-32c6.25-6.25 6.25-16.375 0-22.625s-16.375-6.25-22.625,0l-32,32c-6.251,6.25-6.251,16.375-0.001,22.625z"),r.append("text").attr("class","error-text").attr("x",1240).attr("y",250).attr("font-size","150px").style("text-anchor","middle").text("Syntax error in graph"),r.append("text").attr("class","error-text").attr("x",1050).attr("y",400).attr("font-size","100px").style("text-anchor","middle").text("mermaid version "+e),n.attr("height",100),n.attr("width",400),n.attr("viewBox","768 0 512 512")}catch(t){f.error("Error while rendering info diagram"),f.error(t.message)}},Ba={},La="",Fa={parseDirective:function(t,e,n){$o.parseDirective(this,t,e,n)},getConfig:function(){return xt().pie},addSection:function(t,e){void 0===Ba[t]&&(Ba[t]=e,f.debug("Added new section :",t))},getSections:function(){return Ba},cleanupValue:function(t){return":"===t.substring(0,1)?(t=t.substring(1).trim(),Number(t.trim())):Number(t.trim())},clear:function(){Ba={},La=""},setTitle:function(t){La=t},getTitle:function(){return La}},Pa=n(73),Ia=n.n(Pa),ja={},Ra=function(t){Object.keys(t).forEach((function(e){ja[e]=t[e]}))},Ya=function(t,e){try{var n=Ia.a.parser;n.yy=Fa,f.debug("Rendering info diagram\n"+t),n.yy.clear(),n.parse(t),f.debug("Parsed info diagram");var r=document.getElementById(e);void 0===(ba=r.parentElement.offsetWidth)&&(ba=1200),void 0!==ja.useWidth&&(ba=ja.useWidth);var i=Object(s.select)("#"+e);$(i,450,ba,ja.useMaxWidth),r.setAttribute("viewBox","0 0 "+ba+" 450");var a=Math.min(ba,450)/2-40,o=i.append("g").attr("transform","translate("+ba/2+",225)"),c=Fa.getSections(),u=0;Object.keys(c).forEach((function(t){u+=c[t]}));var l=Object(s.scaleOrdinal)().domain(c).range(s.schemeSet2),h=Object(s.pie)().value((function(t){return t.value}))(Object(s.entries)(c)),d=Object(s.arc)().innerRadius(0).outerRadius(a);o.selectAll("mySlices").data(h).enter().append("path").attr("d",d).attr("fill",(function(t){return l(t.data.key)})).attr("stroke","black").style("stroke-width","2px").style("opacity",.7),o.selectAll("mySlices").data(h).enter().append("text").text((function(t){return(t.data.value/u*100).toFixed(0)+"%"})).attr("transform",(function(t){return"translate("+d.centroid(t)+")"})).style("text-anchor","middle").attr("class","slice").style("font-size",17),o.append("text").text(n.yy.getTitle()).attr("x",0).attr("y",-200).attr("class","pieTitleText");var p=o.selectAll(".legend").data(l.domain()).enter().append("g").attr("class","legend").attr("transform",(function(t,e){return"translate(216,"+(22*e-22*l.domain().length/2)+")"}));p.append("rect").attr("width",18).attr("height",18).style("fill",l).style("stroke",l),p.append("text").attr("x",22).attr("y",14).text((function(t){return t}))}catch(t){f.error("Error while rendering info diagram"),f.error(t)}},za={},Ua=[],$a="",Wa={Cardinality:{ZERO_OR_ONE:"ZERO_OR_ONE",ZERO_OR_MORE:"ZERO_OR_MORE",ONE_OR_MORE:"ONE_OR_MORE",ONLY_ONE:"ONLY_ONE"},Identification:{NON_IDENTIFYING:"NON_IDENTIFYING",IDENTIFYING:"IDENTIFYING"},parseDirective:function(t,e,n){$o.parseDirective(this,t,e,n)},getConfig:function(){return xt().er},addEntity:function(t){void 0===za[t]&&(za[t]=t,f.debug("Added new entity :",t))},getEntities:function(){return za},addRelationship:function(t,e,n,r){var i={entityA:t,roleA:e,entityB:n,relSpec:r};Ua.push(i),f.debug("Added new relationship :",i)},getRelationships:function(){return Ua},clear:function(){za={},Ua=[],$a=""},setTitle:function(t){$a=t},getTitle:function(){return $a}},Va=n(74),Ha=n.n(Va),Ga={ONLY_ONE_START:"ONLY_ONE_START",ONLY_ONE_END:"ONLY_ONE_END",ZERO_OR_ONE_START:"ZERO_OR_ONE_START",ZERO_OR_ONE_END:"ZERO_OR_ONE_END",ONE_OR_MORE_START:"ONE_OR_MORE_START",ONE_OR_MORE_END:"ONE_OR_MORE_END",ZERO_OR_MORE_START:"ZERO_OR_MORE_START",ZERO_OR_MORE_END:"ZERO_OR_MORE_END"},qa=Ga,Xa=function(t,e){var n;t.append("defs").append("marker").attr("id",Ga.ONLY_ONE_START).attr("refX",0).attr("refY",9).attr("markerWidth",18).attr("markerHeight",18).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M9,0 L9,18 M15,0 L15,18"),t.append("defs").append("marker").attr("id",Ga.ONLY_ONE_END).attr("refX",18).attr("refY",9).attr("markerWidth",18).attr("markerHeight",18).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M3,0 L3,18 M9,0 L9,18"),(n=t.append("defs").append("marker").attr("id",Ga.ZERO_OR_ONE_START).attr("refX",0).attr("refY",9).attr("markerWidth",30).attr("markerHeight",18).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",21).attr("cy",9).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M9,0 L9,18"),(n=t.append("defs").append("marker").attr("id",Ga.ZERO_OR_ONE_END).attr("refX",30).attr("refY",9).attr("markerWidth",30).attr("markerHeight",18).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",9).attr("cy",9).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M21,0 L21,18"),t.append("defs").append("marker").attr("id",Ga.ONE_OR_MORE_START).attr("refX",18).attr("refY",18).attr("markerWidth",45).attr("markerHeight",36).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M0,18 Q 18,0 36,18 Q 18,36 0,18 M42,9 L42,27"),t.append("defs").append("marker").attr("id",Ga.ONE_OR_MORE_END).attr("refX",27).attr("refY",18).attr("markerWidth",45).attr("markerHeight",36).attr("orient","auto").append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M3,9 L3,27 M9,18 Q27,0 45,18 Q27,36 9,18"),(n=t.append("defs").append("marker").attr("id",Ga.ZERO_OR_MORE_START).attr("refX",18).attr("refY",18).attr("markerWidth",57).attr("markerHeight",36).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",48).attr("cy",18).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M0,18 Q18,0 36,18 Q18,36 0,18"),(n=t.append("defs").append("marker").attr("id",Ga.ZERO_OR_MORE_END).attr("refX",39).attr("refY",18).attr("markerWidth",57).attr("markerHeight",36).attr("orient","auto")).append("circle").attr("stroke",e.stroke).attr("fill","white").attr("cx",9).attr("cy",18).attr("r",6),n.append("path").attr("stroke",e.stroke).attr("fill","none").attr("d","M21,18 Q39,0 57,18 Q39,36 21,18")},Za={},Ja=function(t){return(t.entityA+t.roleA+t.entityB).replace(/\s/g,"")},Qa=0,Ka=function(t){for(var e=Object.keys(t),n=0;n/gi," "),r=t.append("text");r.attr("x",e.x),r.attr("y",e.y),r.attr("class","legend"),r.style("text-anchor",e.anchor),void 0!==e.class&&r.attr("class",e.class);var i=r.append("tspan");return i.attr("x",e.x+2*e.textMargin),i.text(n),r},go=-1,yo=function(){return{x:0,y:0,width:100,anchor:"start",height:100,rx:0,ry:0}},vo=function(){function t(t,e,n,i,a,o,s,c){r(e.append("text").attr("x",n+a/2).attr("y",i+o/2+5).style("font-color",c).style("text-anchor","middle").text(t),s)}function e(t,e,n,i,a,o,s,c,u){for(var l=c.taskFontSize,h=c.taskFontFamily,f=t.split(//gi),d=0;d3?function(t){var e=Object(s.arc)().startAngle(Math.PI/2).endAngle(Math.PI/2*3).innerRadius(7.5).outerRadius(15/2.2);t.append("path").attr("class","mouth").attr("d",e).attr("transform","translate("+o.cx+","+(o.cy+2)+")")}(c):o.score<3?function(t){var e=Object(s.arc)().startAngle(3*Math.PI/2).endAngle(Math.PI/2*5).innerRadius(7.5).outerRadius(15/2.2);t.append("path").attr("class","mouth").attr("d",e).attr("transform","translate("+o.cx+","+(o.cy+7)+")")}(c):function(t){t.append("line").attr("class","mouth").attr("stroke",2).attr("x1",o.cx-5).attr("y1",o.cy+7).attr("x2",o.cx+5).attr("y2",o.cy+7).attr("class","mouth").attr("stroke-width","1px").attr("stroke","#666")}(c);var u=yo();u.x=e.x,u.y=e.y,u.fill=e.fill,u.width=n.width,u.height=n.height,u.class="task task-type-"+e.num,u.rx=3,u.ry=3,ho(i,u);var l=e.x+14;e.people.forEach((function(t){var n=e.actors[t],r={cx:l,cy:e.y,r:7,fill:n,stroke:"#000",title:t};fo(i,r),l+=10})),vo(n)(e.task,i,u.x,u.y,u.width,u.height,{class:"task"},n,e.colour)},ko=function(t){t.append("defs").append("marker").attr("id","arrowhead").attr("refX",5).attr("refY",2).attr("markerWidth",6).attr("markerHeight",4).attr("orient","auto").append("path").attr("d","M 0,0 V 4 L6,2 Z")};eo.parser.yy=lo;var wo={leftMargin:150,diagramMarginX:50,diagramMarginY:20,taskMargin:50,width:150,height:50,taskFontSize:14,taskFontFamily:'"Open-Sans", "sans-serif"',boxMargin:10,boxTextMargin:5,noteMargin:10,messageMargin:35,messageAlign:"center",bottomMarginAdj:1,activationWidth:10,textPlacement:"fo",actorColours:["#8FBC8F","#7CFC00","#00FFFF","#20B2AA","#B0E0E6","#FFFFE0"],sectionFills:["#191970","#8B008B","#4B0082","#2F4F4F","#800000","#8B4513","#00008B"],sectionColours:["#fff"]},Eo={};var To=wo.leftMargin,Co={data:{startx:void 0,stopx:void 0,starty:void 0,stopy:void 0},verticalPos:0,sequenceItems:[],init:function(){this.sequenceItems=[],this.data={startx:void 0,stopx:void 0,starty:void 0,stopy:void 0},this.verticalPos=0},updateVal:function(t,e,n,r){void 0===t[e]?t[e]=n:t[e]=r(n,t[e])},updateBounds:function(t,e,n,r){var i,a=this,o=0;this.sequenceItems.forEach((function(s){o++;var c=a.sequenceItems.length-o+1;a.updateVal(s,"starty",e-c*wo.boxMargin,Math.min),a.updateVal(s,"stopy",r+c*wo.boxMargin,Math.max),a.updateVal(Co.data,"startx",t-c*wo.boxMargin,Math.min),a.updateVal(Co.data,"stopx",n+c*wo.boxMargin,Math.max),"activation"!==i&&(a.updateVal(s,"startx",t-c*wo.boxMargin,Math.min),a.updateVal(s,"stopx",n+c*wo.boxMargin,Math.max),a.updateVal(Co.data,"starty",e-c*wo.boxMargin,Math.min),a.updateVal(Co.data,"stopy",r+c*wo.boxMargin,Math.max))}))},insert:function(t,e,n,r){var i=Math.min(t,n),a=Math.max(t,n),o=Math.min(e,r),s=Math.max(e,r);this.updateVal(Co.data,"startx",i,Math.min),this.updateVal(Co.data,"starty",o,Math.min),this.updateVal(Co.data,"stopx",a,Math.max),this.updateVal(Co.data,"stopy",s,Math.max),this.updateBounds(i,o,a,s)},bumpVerticalPos:function(t){this.verticalPos=this.verticalPos+t,this.data.stopy=this.verticalPos},getVerticalPos:function(){return this.verticalPos},getBounds:function(){return this.data}},So=wo.sectionFills,Ao=wo.sectionColours,Mo=function(t,e,n){for(var r="",i=n+(2*wo.height+wo.diagramMarginY),a=0,o="#CCC",s="black",c=0,u=0;u tspan {\n fill: ").concat(t.actorTextColor,";\n stroke: none;\n }\n\n .actor-line {\n stroke: ").concat(t.actorLineColor,";\n }\n\n .messageLine0 {\n stroke-width: 1.5;\n stroke-dasharray: none;\n stroke: ").concat(t.signalColor,";\n }\n\n .messageLine1 {\n stroke-width: 1.5;\n stroke-dasharray: 2, 2;\n stroke: ").concat(t.signalColor,";\n }\n\n #arrowhead path {\n fill: ").concat(t.signalColor,";\n stroke: ").concat(t.signalColor,";\n }\n\n .sequenceNumber {\n fill: ").concat(t.sequenceNumberColor,";\n }\n\n #sequencenumber {\n fill: ").concat(t.signalColor,";\n }\n\n #crosshead path {\n fill: ").concat(t.signalColor,";\n stroke: ").concat(t.signalColor,";\n }\n\n .messageText {\n fill: ").concat(t.signalTextColor,";\n stroke: ").concat(t.signalTextColor,";\n }\n\n .labelBox {\n stroke: ").concat(t.labelBoxBorderColor,";\n fill: ").concat(t.labelBoxBkgColor,";\n }\n\n .labelText, .labelText > tspan {\n fill: ").concat(t.labelTextColor,";\n stroke: none;\n }\n\n .loopText, .loopText > tspan {\n fill: ").concat(t.loopTextColor,";\n stroke: none;\n }\n\n .loopLine {\n stroke-width: 2px;\n stroke-dasharray: 2, 2;\n stroke: ").concat(t.labelBoxBorderColor,";\n fill: ").concat(t.labelBoxBorderColor,";\n }\n\n .note {\n //stroke: #decc93;\n stroke: ").concat(t.noteBorderColor,";\n fill: ").concat(t.noteBkgColor,";\n }\n\n .noteText, .noteText > tspan {\n fill: ").concat(t.noteTextColor,";\n stroke: none;\n }\n\n .activation0 {\n fill: ").concat(t.activationBkgColor,";\n stroke: ").concat(t.activationBorderColor,";\n }\n\n .activation1 {\n fill: ").concat(t.activationBkgColor,";\n stroke: ").concat(t.activationBorderColor,";\n }\n\n .activation2 {\n fill: ").concat(t.activationBkgColor,";\n stroke: ").concat(t.activationBorderColor,";\n }\n")},gantt:function(t){return'\n .mermaid-main-font {\n font-family: "trebuchet ms", verdana, arial;\n font-family: var(--mermaid-font-family);\n }\n\n .section {\n stroke: none;\n opacity: 0.2;\n }\n\n .section0 {\n fill: '.concat(t.sectionBkgColor,";\n }\n\n .section2 {\n fill: ").concat(t.sectionBkgColor2,";\n }\n\n .section1,\n .section3 {\n fill: ").concat(t.altSectionBkgColor,";\n opacity: 0.2;\n }\n\n .sectionTitle0 {\n fill: ").concat(t.titleColor,";\n }\n\n .sectionTitle1 {\n fill: ").concat(t.titleColor,";\n }\n\n .sectionTitle2 {\n fill: ").concat(t.titleColor,";\n }\n\n .sectionTitle3 {\n fill: ").concat(t.titleColor,";\n }\n\n .sectionTitle {\n text-anchor: start;\n font-size: 11px;\n text-height: 14px;\n font-family: 'trebuchet ms', verdana, arial;\n font-family: var(--mermaid-font-family);\n\n }\n\n\n /* Grid and axis */\n\n .grid .tick {\n stroke: ").concat(t.gridColor,";\n opacity: 0.8;\n shape-rendering: crispEdges;\n text {\n font-family: ").concat(t.fontFamily,";\n fill: ").concat(t.textColor,";\n }\n }\n\n .grid path {\n stroke-width: 0;\n }\n\n\n /* Today line */\n\n .today {\n fill: none;\n stroke: ").concat(t.todayLineColor,";\n stroke-width: 2px;\n }\n\n\n /* Task styling */\n\n /* Default task */\n\n .task {\n stroke-width: 2;\n }\n\n .taskText {\n text-anchor: middle;\n font-family: 'trebuchet ms', verdana, arial;\n font-family: var(--mermaid-font-family);\n }\n\n .taskText:not([font-size]) {\n font-size: 11px;\n }\n\n .taskTextOutsideRight {\n fill: ").concat(t.taskTextDarkColor,";\n text-anchor: start;\n font-size: 11px;\n font-family: 'trebuchet ms', verdana, arial;\n font-family: var(--mermaid-font-family);\n\n }\n\n .taskTextOutsideLeft {\n fill: ").concat(t.taskTextDarkColor,";\n text-anchor: end;\n font-size: 11px;\n }\n\n /* Special case clickable */\n .task.clickable {\n cursor: pointer;\n }\n .taskText.clickable {\n cursor: pointer;\n fill: ").concat(t.taskTextClickableColor," !important;\n font-weight: bold;\n }\n\n .taskTextOutsideLeft.clickable {\n cursor: pointer;\n fill: ").concat(t.taskTextClickableColor," !important;\n font-weight: bold;\n }\n\n .taskTextOutsideRight.clickable {\n cursor: pointer;\n fill: ").concat(t.taskTextClickableColor," !important;\n font-weight: bold;\n }\n\n /* Specific task settings for the sections*/\n\n .taskText0,\n .taskText1,\n .taskText2,\n .taskText3 {\n fill: ").concat(t.taskTextColor,";\n }\n\n .task0,\n .task1,\n .task2,\n .task3 {\n fill: ").concat(t.taskBkgColor,";\n stroke: ").concat(t.taskBorderColor,";\n }\n\n .taskTextOutside0,\n .taskTextOutside2\n {\n fill: ").concat(t.taskTextOutsideColor,";\n }\n\n .taskTextOutside1,\n .taskTextOutside3 {\n fill: ").concat(t.taskTextOutsideColor,";\n }\n\n\n /* Active task */\n\n .active0,\n .active1,\n .active2,\n .active3 {\n fill: ").concat(t.activeTaskBkgColor,";\n stroke: ").concat(t.activeTaskBorderColor,";\n }\n\n .activeText0,\n .activeText1,\n .activeText2,\n .activeText3 {\n fill: ").concat(t.taskTextDarkColor," !important;\n }\n\n\n /* Completed task */\n\n .done0,\n .done1,\n .done2,\n .done3 {\n stroke: ").concat(t.doneTaskBorderColor,";\n fill: ").concat(t.doneTaskBkgColor,";\n stroke-width: 2;\n }\n\n .doneText0,\n .doneText1,\n .doneText2,\n .doneText3 {\n fill: ").concat(t.taskTextDarkColor," !important;\n }\n\n\n /* Tasks on the critical line */\n\n .crit0,\n .crit1,\n .crit2,\n .crit3 {\n stroke: ").concat(t.critBorderColor,";\n fill: ").concat(t.critBkgColor,";\n stroke-width: 2;\n }\n\n .activeCrit0,\n .activeCrit1,\n .activeCrit2,\n .activeCrit3 {\n stroke: ").concat(t.critBorderColor,";\n fill: ").concat(t.activeTaskBkgColor,";\n stroke-width: 2;\n }\n\n .doneCrit0,\n .doneCrit1,\n .doneCrit2,\n .doneCrit3 {\n stroke: ").concat(t.critBorderColor,";\n fill: ").concat(t.doneTaskBkgColor,";\n stroke-width: 2;\n cursor: pointer;\n shape-rendering: crispEdges;\n }\n\n .milestone {\n transform: rotate(45deg) scale(0.8,0.8);\n }\n\n .milestoneText {\n font-style: italic;\n }\n .doneCritText0,\n .doneCritText1,\n .doneCritText2,\n .doneCritText3 {\n fill: ").concat(t.taskTextDarkColor," !important;\n }\n\n .activeCritText0,\n .activeCritText1,\n .activeCritText2,\n .activeCritText3 {\n fill: ").concat(t.taskTextDarkColor," !important;\n }\n\n .titleText {\n text-anchor: middle;\n font-size: 18px;\n fill: ").concat(t.textColor," ;\n font-family: 'trebuchet ms', verdana, arial;\n font-family: var(--mermaid-font-family);\n }\n")},classDiagram:No,"classDiagram-v2":No,class:No,stateDiagram:Lo,state:Lo,git:function(){return"\n .commit-id,\n .commit-msg,\n .branch-label {\n fill: lightgrey;\n color: lightgrey;\n font-family: 'trebuchet ms', verdana, arial;\n font-family: var(--mermaid-font-family);\n }\n"},info:function(){return""},pie:function(t){return".pieTitleText {\n text-anchor: middle;\n font-size: 25px;\n fill: ".concat(t.taskTextDarkColor,";\n font-family: ").concat(t.fontFamily,";\n }\n .slice {\n font-family: ").concat(t.fontFamily,";\n fill: ").concat(t.textColor,";\n // fill: white;\n }\n .legend text {\n fill: ").concat(t.taskTextDarkColor,";\n font-family: ").concat(t.fontFamily,";\n font-size: 17px;\n }\n")},er:function(t){return"\n .entityBox {\n fill: ".concat(t.mainBkg,";\n stroke: ").concat(t.nodeBorder,";\n }\n\n .relationshipLabelBox {\n fill: ").concat(t.tertiaryColor,";\n opacity: 0.7;\n background-color: ").concat(t.tertiaryColor,";\n rect {\n opacity: 0.5;\n }\n }\n\n .relationshipLine {\n stroke: ").concat(t.lineColor,";\n }\n")},journey:function(t){return".label {\n font-family: 'trebuchet ms', verdana, arial;\n font-family: var(--mermaid-font-family);\n color: ".concat(t.textColor,";\n }\n .mouth {\n stroke: #666;\n }\n\n line {\n stroke: ").concat(t.textColor,"\n }\n\n .legend {\n fill: ").concat(t.textColor,";\n }\n\n .label text {\n fill: #333;\n }\n .label {\n color: ").concat(t.textColor,"\n }\n\n .face {\n fill: #FFF8DC;\n stroke: #999;\n }\n\n .node rect,\n .node circle,\n .node ellipse,\n .node polygon,\n .node path {\n fill: ").concat(t.mainBkg,";\n stroke: ").concat(t.nodeBorder,";\n stroke-width: 1px;\n }\n\n .node .label {\n text-align: center;\n }\n .node.clickable {\n cursor: pointer;\n }\n\n .arrowheadPath {\n fill: ").concat(t.arrowheadColor,";\n }\n\n .edgePath .path {\n stroke: ").concat(t.lineColor,";\n stroke-width: 1.5px;\n }\n\n .flowchart-link {\n stroke: ").concat(t.lineColor,";\n fill: none;\n }\n\n .edgeLabel {\n background-color: ").concat(t.edgeLabelBackground,";\n rect {\n opacity: 0.5;\n }\n text-align: center;\n }\n\n .cluster rect {\n }\n\n .cluster text {\n fill: ").concat(t.titleColor,";\n }\n\n div.mermaidTooltip {\n position: absolute;\n text-align: center;\n max-width: 200px;\n padding: 2px;\n font-family: 'trebuchet ms', verdana, arial;\n font-family: var(--mermaid-font-family);\n font-size: 12px;\n background: ").concat(t.tertiaryColor,";\n border: 1px solid ").concat(t.border2,";\n border-radius: 2px;\n pointer-events: none;\n z-index: 100;\n }\n\n .task-type-0, .section-type-0 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType0):"",";\n }\n .task-type-1, .section-type-1 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType1):"",";\n }\n .task-type-2, .section-type-2 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType2):"",";\n }\n .task-type-3, .section-type-3 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType3):"",";\n }\n .task-type-4, .section-type-4 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType4):"",";\n }\n .task-type-5, .section-type-5 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType5):"",";\n }\n .task-type-6, .section-type-6 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType6):"",";\n }\n .task-type-7, .section-type-7 {\n ").concat(t.fillType0?"fill: ".concat(t.fillType7):"",";\n }\n")}},Po=function(t,e,n){return" {\n font-family: ".concat(n.fontFamily,";\n font-size: ").concat(n.fontSize,";\n fill: ").concat(n.textColor,"\n }\n\n /* Classes common for multiple diagrams */\n\n .error-icon {\n fill: ").concat(n.errorBkgColor,";\n }\n .error-text {\n fill: ").concat(n.errorTextColor,";\n stroke: ").concat(n.errorTextColor,";\n }\n\n .edge-thickness-normal {\n stroke-width: 2px;\n }\n .edge-thickness-thick {\n stroke-width: 3.5px\n }\n .edge-pattern-solid {\n stroke-dasharray: 0;\n }\n\n .edge-pattern-dashed{\n stroke-dasharray: 3;\n }\n .edge-pattern-dotted {\n stroke-dasharray: 2;\n }\n\n .marker {\n fill: ").concat(n.lineColor,";\n }\n .marker.cross {\n stroke: ").concat(n.lineColor,";\n }\n\n svg {\n font-family: ").concat(n.fontFamily,";\n font-size: ").concat(n.fontSize,";\n }\n\n ").concat(Fo[t](n),"\n\n ").concat(e,"\n\n ").concat(t," { fill: apa;}\n")};function Io(t){return(Io="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(t)}var jo={},Ro=function(t,e,n){switch(f.debug("Directive type=".concat(e.type," with args:"),e.args),e.type){case"init":case"initialize":["config"].forEach((function(t){void 0!==e.args[t]&&("flowchart-v2"===n&&(n="flowchart"),e.args[n]=e.args[t],delete e.args[t])})),e.args,kt(e.args);break;case"wrap":case"nowrap":t&&t.setWrap&&t.setWrap("wrap"===e.type);break;default:f.warn("Unhandled directive: source: '%%{".concat(e.type,": ").concat(JSON.stringify(e.args?e.args:{}),"}%%"),e)}};function Yo(t){xa(t.git),ve(t.flowchart),Nn(t.flowchart),void 0!==t.sequenceDiagram&&br.setConf(P(t.sequence,t.sequenceDiagram)),br.setConf(t.sequence),ei(t.gantt),ci(t.class),Ri(t.state),Hi(t.state),Aa(t.class),Ra(t.class),Ka(t.er),Oo(t.journey),Da(t.class)}function zo(){}var Uo=Object.freeze({render:function(t,e,n,r){wt();var i=e,a=W.detectInit(i);a&&kt(a);var u=xt();if(e.length>u.maxTextSize&&(i="graph TB;a[Maximum text size in diagram exceeded];style a fill:#faa"),void 0!==r)r.innerHTML="",Object(s.select)(r).append("div").attr("id","d"+t).attr("style","font-family: "+u.fontFamily).append("svg").attr("id",t).attr("width","100%").attr("xmlns","http://www.w3.org/2000/svg").append("g");else{var l=document.getElementById(t);l&&l.remove();var h=document.querySelector("#d"+t);h&&h.remove(),Object(s.select)("body").append("div").attr("id","d"+t).append("svg").attr("id",t).attr("width","100%").attr("xmlns","http://www.w3.org/2000/svg").append("g")}window.txt=i,i=function(t){var e=t;return e=(e=(e=e.replace(/style.*:\S*#.*;/g,(function(t){return t.substring(0,t.length-1)}))).replace(/classDef.*:\S*#.*;/g,(function(t){return t.substring(0,t.length-1)}))).replace(/#\w+;/g,(function(t){var e=t.substring(1,t.length-1);return/^\+?\d+$/.test(e)?"fl°°"+e+"¶ß":"fl°"+e+"¶ß"}))}(i);var d=Object(s.select)("#d"+t).node(),p=W.detectType(i),g=d.firstChild,y=g.firstChild,v="";if(void 0!==u.themeCSS&&(v+="\n".concat(u.themeCSS)),void 0!==u.fontFamily&&(v+="\n:root { --mermaid-font-family: ".concat(u.fontFamily,"}")),void 0!==u.altFontFamily&&(v+="\n:root { --mermaid-alt-font-family: ".concat(u.altFontFamily,"}")),"flowchart"===p||"flowchart-v2"===p||"graph"===p){var m=me(i);for(var b in m)v+="\n.".concat(b," > * { ").concat(m[b].styles.join(" !important; ")," !important; }"),m[b].textStyles&&(v+="\n.".concat(b," tspan { ").concat(m[b].textStyles.join(" !important; ")," !important; }"))}var x=(new o.a)("#".concat(t),Po(p,v,u.themeVariables)),_=document.createElement("style");_.innerHTML=x,g.insertBefore(_,y);try{switch(p){case"git":u.flowchart.arrowMarkerAbsolute=u.arrowMarkerAbsolute,xa(u.git),_a(i,t,!1);break;case"flowchart":u.flowchart.arrowMarkerAbsolute=u.arrowMarkerAbsolute,ve(u.flowchart),be(i,t,!1);break;case"flowchart-v2":u.flowchart.arrowMarkerAbsolute=u.arrowMarkerAbsolute,Nn(u.flowchart),Bn(i,t,!1);break;case"sequence":u.sequence.arrowMarkerAbsolute=u.arrowMarkerAbsolute,u.sequenceDiagram?(br.setConf(Object.assign(u.sequence,u.sequenceDiagram)),console.error("`mermaid config.sequenceDiagram` has been renamed to `config.sequence`. Please update your mermaid config.")):br.setConf(u.sequence),br.draw(i,t);break;case"gantt":u.gantt.arrowMarkerAbsolute=u.arrowMarkerAbsolute,ei(u.gantt),ni(i,t);break;case"class":u.class.arrowMarkerAbsolute=u.arrowMarkerAbsolute,ci(u.class),ui(i,t);break;case"classDiagram":u.class.arrowMarkerAbsolute=u.arrowMarkerAbsolute,hi(u.class),fi(i,t);break;case"state":u.class.arrowMarkerAbsolute=u.arrowMarkerAbsolute,Ri(u.state),Yi(i,t);break;case"stateDiagram":u.class.arrowMarkerAbsolute=u.arrowMarkerAbsolute,Hi(u.state),Gi(i,t);break;case"info":u.class.arrowMarkerAbsolute=u.arrowMarkerAbsolute,Aa(u.class),Ma(i,t,c.version);break;case"pie":u.class.arrowMarkerAbsolute=u.arrowMarkerAbsolute,Ra(u.pie),Ya(i,t,c.version);break;case"er":Ka(u.er),to(i,t,c.version);break;case"journey":Oo(u.journey),Do(i,t,c.version)}}catch(e){throw Na(t,c.version),e}Object(s.select)('[id="'.concat(t,'"]')).selectAll("foreignobject > *").attr("xmlns","http://www.w3.org/1999/xhtml");var k=Object(s.select)("#d"+t).node().innerHTML;if(f.debug("cnf.arrowMarkerAbsolute",u.arrowMarkerAbsolute),u.arrowMarkerAbsolute&&"false"!==u.arrowMarkerAbsolute||(k=k.replace(/marker-end="url\(.*?#/g,'marker-end="url(#',"g")),k=function(t){var e=t;return e=(e=(e=e.replace(/fl°°/g,(function(){return"&#"}))).replace(/fl°/g,(function(){return"&"}))).replace(/¶ß/g,(function(){return";"}))}(k),void 0!==n)switch(p){case"flowchart":case"flowchart-v2":n(k,qt.bindFunctions);break;case"gantt":n(k,Jr.bindFunctions);break;case"class":case"classDiagram":n(k,on.bindFunctions);break;default:n(k)}else f.debug("CB = undefined!");var w=Object(s.select)("#d"+t).node();return null!==w&&"function"==typeof w.remove&&Object(s.select)("#d"+t).node().remove(),k},parse:function(t){var e=W.detectInit(t);e&&f.debug("reinit ",e);var n,r=W.detectType(t);switch(f.debug("Type "+r),r){case"git":(n=ua.a).parser.yy=sa;break;case"flowchart":case"flowchart-v2":qt.clear(),(n=Zt.a).parser.yy=qt;break;case"sequence":(n=$n.a).parser.yy=ar;break;case"gantt":(n=_r.a).parser.yy=Jr;break;case"class":case"classDiagram":(n=ii.a).parser.yy=on;break;case"state":case"stateDiagram":(n=Mi.a).parser.yy=Si;break;case"info":f.debug("info info info"),(n=Ca.a).parser.yy=Ea;break;case"pie":f.debug("pie"),(n=Ia.a).parser.yy=Fa;break;case"er":f.debug("er"),(n=Ha.a).parser.yy=Wa;break;case"journey":f.debug("Journey"),(n=no.a).parser.yy=lo}return n.parser.yy.graphType=r,n.parser.yy.parseError=function(t,e){throw{str:t,hash:e}},n.parse(t),n},parseDirective:function(t,e,n,r){try{if(void 0!==e)switch(e=e.trim(),n){case"open_directive":jo={};break;case"type_directive":jo.type=e.toLowerCase();break;case"arg_directive":jo.args=JSON.parse(e);break;case"close_directive":Ro(t,jo,r),jo=null}}catch(t){f.error("Error while rendering sequenceDiagram directive: ".concat(e," jison context: ").concat(n)),f.error(t.message)}},initialize:function(t){t&&t.fontFamily&&(t.themeVariables&&t.themeVariables.fontFamily||(t.themeVariables={fontFamily:t.fontFamily})),ft=P({},t),t&&t.theme&<[t.theme]?t.themeVariables=lt[t.theme].getThemeVariables(t.themeVariables):t&&(t.themeVariables=lt.default.getThemeVariables(t.themeVariables));var e="object"===Io(t)?function(t){return gt=P({},pt),gt=P(gt,t),t.theme&&(gt.themeVariables=lt[t.theme].getThemeVariables(t.themeVariables)),vt=mt(gt,yt),gt}(t):bt();Yo(e),d(e.logLevel)},reinitialize:zo,getConfig:xt,setConfig:function(t){return P(vt,t),xt()},getSiteConfig:bt,updateSiteConfig:function(t){return gt=P(gt,t),mt(gt,yt),gt},reset:function(){wt()},globalReset:function(){wt(),Yo(xt())},defaultConfig:pt});d(xt().logLevel),wt(xt());var $o=Uo,Wo=function(){Vo.startOnLoad?$o.getConfig().startOnLoad&&Vo.init():void 0===Vo.startOnLoad&&(f.debug("In start, no config"),$o.getConfig().startOnLoad&&Vo.init())};"undefined"!=typeof document&& -/*! - * Wait for document loaded before starting the execution - */ -window.addEventListener("load",(function(){Wo()}),!1);var Vo={startOnLoad:!0,htmlLabels:!0,mermaidAPI:$o,parse:$o.parse,render:$o.render,init:function(){var t,e,n,r=this,a=$o.getConfig();arguments.length>=2?( -/*! sequence config was passed as #1 */ -void 0!==arguments[0]&&(Vo.sequenceConfig=arguments[0]),t=arguments[1]):t=arguments[0],"function"==typeof arguments[arguments.length-1]?(e=arguments[arguments.length-1],f.debug("Callback function found")):void 0!==a.mermaid&&("function"==typeof a.mermaid.callback?(e=a.mermaid.callback,f.debug("Callback function found")):f.debug("No Callback function found")),t=void 0===t?document.querySelectorAll(".mermaid"):"string"==typeof t?document.querySelectorAll(t):t instanceof window.Node?[t]:t,f.debug("Start On Load before: "+Vo.startOnLoad),void 0!==Vo.startOnLoad&&(f.debug("Start On Load inner: "+Vo.startOnLoad),$o.updateSiteConfig({startOnLoad:Vo.startOnLoad})),void 0!==Vo.ganttConfig&&$o.updateSiteConfig({gantt:Vo.ganttConfig});for(var o=function(a){var o=t[a]; -/*! Check if previously processed */if(o.getAttribute("data-processed"))return"continue";o.setAttribute("data-processed",!0);var s="mermaid-".concat(Date.now());n=i(n=o.innerHTML).trim().replace(//gi,"
");var c=W.detectInit(n);c&&f.debug("Detected early reinit: ",c);try{$o.render(s,n,(function(t,n){o.innerHTML=t,void 0!==e&&e(s),n&&n(o)}),o)}catch(t){f.warn("Syntax Error rendering"),f.warn(t),r.parseError&&r.parseError(t)}},s=0;s - -DESCRIPTION: - validate the relayer configuration -``` - -__Example__ - -Validate the default config file, the path inferred automatically to be -`$HOME/.hermes/config.toml`. - -```shell -hermes config validate -``` - -```text -hermes config validate -Jul 12 16:31:07.017 INFO using default configuration from '$HOME/.hermes/config.toml' -Success: "validation passed successfully" -``` - -Validate a config file at an arbitrary location: - -```shell -hermes -c ./config.toml config validate -``` - -This one fails validation because we mistakenly added two separate sections for -the same chain `ibc-1`: - -```text -hermes -c ./config.toml config validate -error: hermes fatal error: config error: config file has duplicate entry for the chain with id ibc-1 -``` diff --git a/guide/src/commands/global.md b/guide/src/commands/global.md deleted file mode 100644 index 6c302987f0..0000000000 --- a/guide/src/commands/global.md +++ /dev/null @@ -1,113 +0,0 @@ -# Global options - -Hermes accepts global options which affect all commands. - -```shell -hermes 0.15.0 -Informal Systems -Implementation of `hermes`, an IBC Relayer developed in Rust. - -FLAGS: - -c, --config CONFIG path to configuration file - -j, --json enable JSON output -``` - -The flags must be specified right after the `hermes` command and before any subcommand. - -__Example__ - -To start the relayer using the configuration file at `/home/my_chain.toml` and enable JSON output: - -```shell -hermes -c /home/my_chain.toml --json start -``` - -## JSON output - -If the `--json` option is supplied, all relayer commands will output single-line JSON values instead of plain text. - -Log messages will be written to `stderr`, while the final result will be written to `stdout`, and everything -will be formatted as JSON. -This allows processing only the final output using [`jq`](https://stedolan.github.io/jq/). -To process all the output using `jq`, one can redirect `stderr` to `stdout` with `hermes --json COMMAND 2>&1 | jq`. - -__Example__ - -```shell -hermes -c /home/my_chain.toml --json create client ibc-0 ibc-1 -``` - -```json -{"timestamp":"Apr 13 20:46:31.921","level":"INFO","fields":{"message":"Using default configuration from: '.hermes/config.toml'"},"target":"ibc_relayer_cli::commands"} -{"timestamp":"Apr 13 20:46:31.961","level":"INFO","fields":{"message":"running listener","chain.id":"ibc-1"},"target":"ibc_relayer::event::monitor"} -{"timestamp":"Apr 13 20:46:31.989","level":"INFO","fields":{"message":"running listener","chain.id":"ibc-0"},"target":"ibc_relayer::event::monitor"} -{"result":{"CreateClient":{"client_id":"07-tendermint-1","client_type":"Tendermint","consensus_height":{"revision_height":10060,"revision_number":1},"height":{"revision_height":10072,"revision_number":0}}},"status":"success"} -``` - -The first three lines are printed to `stderr`, while the last line with a `"result"` key is printed to `stdout`. - -__Example__ - -To improve the readability, pipe all of the output to `jq`: - -``` -hermes -c /home/my_chain.toml --json create client ibc-0 ibc-1 2>&1 | jq -``` - -```json -{ - "timestamp": "Apr 13 20:52:26.060", - "level": "INFO", - "fields": { - "message": "Using default configuration from: '.hermes/config.toml'" - }, - "target": "ibc_relayer_cli::commands" -} -{ - "timestamp": "Apr 13 20:52:26.082", - "level": "INFO", - "fields": { - "message": "running listener", - "chain.id": "ibc-1" - }, - "target": "ibc_relayer::event::monitor" -} -{ - "timestamp": "Apr 13 20:52:26.088", - "level": "INFO", - "fields": { - "message": "running listener", - "chain.id": "ibc-0" - }, - "target": "ibc_relayer::event::monitor" -} -{ - "result": { - "CreateClient": { - "client_id": "07-tendermint-5", - "client_type": "Tendermint", - "consensus_height": { - "revision_height": 10364, - "revision_number": 1 - }, - "height": { - "revision_height": 10375, - "revision_number": 0 - } - } - }, - "status": "success" -} -``` - -__Example__ - -To extract the identifer of the newly created client above: - -``` -hermes -c /home/my_chain.toml --json create client ibc-0 ibc-1 | jq '.result.CreateClient.client_id' -``` - -``` -"07-tendermint-2" -``` diff --git a/guide/src/commands/index.md b/guide/src/commands/index.md deleted file mode 100644 index 9f7ede994e..0000000000 --- a/guide/src/commands/index.md +++ /dev/null @@ -1,41 +0,0 @@ -# Commands - -The `Commands` section presents the commands current available in Hermes - -## Sections - -**[Keys](./keys/index.md)** - -Commands to manage keys (private keys) for each chain. - -**[Config](./config.md)** - -Commands to manage configuration file, in particular to validate it. - -**[Path Setup](./path-setup/index.md)** - -Commands to manage clients, connections, channels. - -**[Relaying](./relaying/index.md)** - -Commands to start the relayer and relay packets. - -**[Listen Mode](./listen/index.md)** - -Commands to listen for IBC events - -**[Upgrade](./upgrade/index.md)** - -Commands to perform client upgrade - -**[Monitor](./misbehaviour/index.md)** - -Commands to monitor clients and submit evidence of misbehaviour - -**[Queries](./queries/index.md)** - -Commands to execute queries on configured chains - -**[Raw Transactions](./raw/index.md)** - -Commands to submit individual transactions to configured chains diff --git a/guide/src/commands/keys/index.md b/guide/src/commands/keys/index.md deleted file mode 100644 index e246d0ef4a..0000000000 --- a/guide/src/commands/keys/index.md +++ /dev/null @@ -1,344 +0,0 @@ -# Adding Keys to the Relayer - -> __WARNING__: Currently the relayer does NOT support a `keyring` store to securely -> store the private key file. The key file will be stored on the local file system -> in the user __$HOME__ folder under `$HOME/.hermes/keys/` - -> __BREAKING__: As of Hermes v1.0.0, the sub-command `keys restore` has been removed. -> Please use the sub-command `keys add` in order to restore a key. - ---- - -Using the `keys` command you can add and list keys. - -#### Show usage - -To see the available sub-commands for the `keys` command run: - -```shell -hermes help keys -``` - -The available sub-commands are the following: - -```shell -USAGE: - hermes keys - -DESCRIPTION: - Manage keys in the relayer for each chain - -SUBCOMMANDS: - help Get usage information - add Adds key to a configured chain or restores a key to a configured chain - using a mnemonic - balance Query balance for a key from a configured chain. If no key is given, the - key is retrieved from the configuration file - delete Delete key(s) from a configured chain - list List keys configured on a chain -``` - -### Key Seed file (Private Key) - -In order to execute the command below you need a private key file (JSON). The relayer uses the private key file to sign the transactions submitted to the chain. - -The private key file can be obtained by using the `keys add` on a Cosmos chain. For example, the command for `gaiad` is: - -```shell -# The `key_name` parameter is the name of the key that will be found in the json output -# For example, in the "Two Local Chains" tutorial, we use "testkey". -gaiad keys add --output json -``` - -The command outputs a JSON similar to the one below. - -```json -{ - "name": "testkey", - "type": "local", - "address": "cosmos1tc3vcuxyyac0dmayf887t95tdg7qpyql48w7gj", - "pubkey": "cosmospub1addwnpepqgg7ng4ycm60pdxfzdfh4hjvkwcr3da59mr8k883vsstx60ruv7kur4525u", - "mnemonic": "[24 words mnemonic]" -} -``` - -You can save this to a file (e.g. `key_seed.json`) and use it to add to the relayer with `hermes keys add -f key_seed.json`. See the `Adding Keys` section for more details. - -### Adding and restoring Keys - -The command `keys add` has two exclusive flags, `--key-file` and `--mnemonic-file` which are respectively used to add and restore a key. - -```shell - hermes keys add [OPTIONS] --key-file --mnemonic-file - -DESCRIPTION: - Adds key to a configured chain or restores a key to a configured chain using a mnemonic - -ARGS: - chain_id identifier of the chain - -FLAGS: - -f, --key-file - path to the key file - - -m, --mnemonic-file - path to file containing mnemonic to restore the key from - -OPTIONS: - -k, --key-name - name of the key (defaults to the `key_name` defined in the config) - - -p, --hd-path - derivation path for this key [default: m/44'/118'/0'/0/0] -``` - -#### Add a private key to a chain from a key file - -```shell - hermes keys add [OPTIONS] --key-file - -DESCRIPTION: - Adds key to a configured chain or restores a key to a configured chain using a mnemonic - -ARGS: - chain_id identifier of the chain - -FLAGS: - -f, --key-file - path to the key file - -OPTIONS: - -k, --key-name - name of the key (defaults to the `key_name` defined in the config) - - -p, --hd-path - derivation path for this key [default: m/44'/118'/0'/0/0] -``` - -To add a private key file to a chain: - -```shell -hermes -c config.toml keys add [CHAIN_ID] -f [PRIVATE_KEY_FILE] -``` - -The content of the file key should have the same format as the output of the `gaiad keys add` command: - -```json -{ - "name": "testkey", - "type": "local", - "address": "cosmos1tc3vcuxyyac0dmayf887t95tdg7qpyql48w7gj", - "pubkey": "cosmospub1addwnpepqgg7ng4ycm60pdxfzdfh4hjvkwcr3da59mr8k883vsstx60ruv7kur4525u", - "mnemonic": "[24 words mnemonic]" -} -``` - -If the command is successful a message similar to the one below will be displayed: - -```json -Success: Added key testkey ([ADDRESS]) on [CHAIN ID] chain -``` - -> **Key name:** -> By default, the key will be named after the `key_name` property specified in the configuration file. -> To use a different key name, specify the `--key-name` option when invoking `keys add`. -> -> ``` -> hermes -c config.toml keys add [CHAINID] -f [PRIVATE_KEY_FILE] -k [KEY_NAME] -> ``` - -#### Restore a private key to a chain from a mnemonic - -```shell - hermes keys add [OPTIONS] --mnemonic-file - -DESCRIPTION: - Adds key to a configured chain or restores a key to a configured chain using a mnemonic - -ARGS: - chain_id identifier of the chain - -FLAGS: - -m, --mnemonic-file - path to file containing mnemonic to restore the key from - -OPTIONS: - -k, --key-name - name of the key (defaults to the `key_name` defined in the config) - - -p, --hd-path - derivation path for this key [default: m/44'/118'/0'/0/0] -``` - -To restore a key from its mnemonic: - -```shell -hermes -c config.toml keys add [CHAIN_ID] -m "[MNEMONIC_FILE]" -``` - -or using an explicit [derivation path](https://github.com/satoshilabs/slips/blob/master/slip-0044.md), for example -an Ethereum coin type (used for Evmos, Injective, Umee, Cronos, and -possibly other networks): - -```shell -hermes -c config.toml keys add --mnemonic-file --hd-path "m/44'/60'/0'/0/0" -``` - -The mnemonic file needs to have the 24 mnemonic words on the same line, separated by a white space. So the content should have the following format: -``` -word1 word2 word3 ... word24 -``` - -If the command is successful a message similar to the one below will be displayed: - -```json -Success: Restore key testkey ([ADDRESS]) on [CHAIN ID] chain -``` - -> **Key name:** -> By default, the key will be named after the `key_name` property specified in the configuration file. -> To use a different key name, specify the `--key-name` option when invoking `keys add`. -> -> ``` -> hermes -c config.toml keys add [CHAINID] -m "[MNEMONIC_FILE]" -k [KEY_NAME] -> ``` - -### Delete keys - -In order to delete the private keys added to chains use the `keys delete` command - -```shell -USAGE: - hermes keys delete - -DESCRIPTION: - Delete key(s) from a configured chain - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain - -FLAGS: - -n, --name NAME name of the key - -a, --all delete all keys -``` - -#### Delete private keys that was previously added to a chain - -To delete a single private key by name: - -```shell -hermes -c config.toml keys delete [CHAIN_ID] -n [KEY_NAME] -``` - -Alternatively, to delete all private keys added to a chain: - -```shell -hermes -c config.toml keys delete [CHAIN_ID] -a -``` - -### List keys - -In order to list the private keys added to chains use the `keys list` command - -```shell -USAGE: - hermes keys list - -DESCRIPTION: - List keys configured on a chain - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain -``` - -#### Listing the private key that was added to a chain - -To list the private key file that was added to a chain: - -```shell -hermes -c config.toml keys list [CHAIN_ID] -``` - -If the command is successful a message similar to the one below will be displayed: - -``` -Success: -- user2 (cosmos1attn9fxrcvjz483w3tu4cfz77ldmlyujly3q3k) -- testkey (cosmos1dw88vdekeeuta5u50p6n5lt5v5c6y2we0pu8nz) -``` - -**JSON:** - -```shell -hermes --json -c config.toml keys list [CHAIN_ID] | jq -``` - -If the command is successful a message similar to the one below will be displayed: - -```json -{ - "result": { - "testkey": { - "account": "cosmos1dw88vdekeeuta5u50p6n5lt5v5c6y2we0pu8nz", - "address": [ 107, 142, 118, 55, 54, 206, 120, 190, 211, 148, 120, 117, 58, 125, 116, 101, 49, 162, 41, 217 ], - "coin_type": 118, - "private_key": "(snip)", - "public_key": "xpub6Gc7ZUt2q1BiQYjhUextPv5bZLwosHigZYqEquPD6FkAGmHDrLiBgE5Xnh8XGZp79rAXtZn1Dt3DNQHxxgCgVQqfRMfVsRiXn6mwULBnYq7" - }, - "user2": { - "account": "cosmos1attn9fxrcvjz483w3tu4cfz77ldmlyujly3q3k", - "address": [ 234, 215, 50, 164, 195, 195, 36, 42, 158, 46, 138, 249, 92, 36, 94, 247, 219, 191, 147, 146 ], - "coin_type": 118, - "private_key": "(snip)", - "public_key": "xpub6FmDbeGTWVjSvHrqHfrpnMTZxpPX1V7XFiq5nMuvgwX9jumt1yUuwNAUQo8Nn36unbFShg6iSjkfMBgeY49wik7rF91N2SHvarpX62ByWMf" - } - }, - "status": "success" -} -``` -### Query balance - -In order to retrieve the balance of an account associated with a key use the `keys balance` command - -```shell -USAGE: - hermes keys balance [OPTIONS] - -DESCRIPTION: - Query balance for a key from a configured chain. If no key is given, the key is retrieved from the configuration file - -ARGS: - chain_id identifier of the chain - -OPTIONS: - -k, --key-name (optional) name of the key (defaults to the `key_name` defined in the config) -``` - -If the command is successful a message with the following format will be displayed: - -``` -Success: balance for key `KEY_NAME`: 100000000000 stake -``` - -**JSON:** - -```shell - hermes --json keys balance [OPTIONS] -``` -or - -```shell - hermes -j keys balance [OPTIONS] -``` - -If the command is successful a message with the following format will be displayed: - -```json -{ - "result": { - "amount": "99989207", - "denom": "stake" - }, - "status": "success" -} -``` \ No newline at end of file diff --git a/guide/src/commands/listen/index.md b/guide/src/commands/listen/index.md deleted file mode 100644 index dd4fc8472d..0000000000 --- a/guide/src/commands/listen/index.md +++ /dev/null @@ -1,162 +0,0 @@ -# Relayer Listen Mode - -The relayer can be started in `listen` mode to display the events emitted by a given chain. `NewBlock` and `Tx` IBC events are shown. - -```shell -USAGE: - hermes listen - -DESCRIPTION: - Listen to and display IBC events emitted by a chain - -POSITIONAL ARGUMENTS: - chain_id Identifier of the chain to listen for events from - -FLAGS: - -e, --event EVENT Add an event type to listen for, can be repeated. Listen for all events by default (available: Tx, NewBlock) -``` - -__Example__ - -Start the relayer in listen mode for all `ibc-0` events and observe the output: - -```shell -hermes listen ibc-0 -``` - -```json -EventBatch { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - height: block::Height(10914), - events: [ - NewBlock( - NewBlock { - height: block::Height(10914), - }, - ), - ], -} -EventBatch { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - height: block::Height(10915), - events: [ - OpenInitConnection( - OpenInit( - Attributes { - height: block::Height(10915), - connection_id: Some( - ConnectionId( - "connection-3", - ), - ), - client_id: ClientId( - "07-tendermint-3", - ), - counterparty_connection_id: None, - counterparty_client_id: ClientId( - "07-tendermint-5", - ), - }, - ), - ), - ], - -... - -EventBatch { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - height: block::Height(10919), - events: [ - UpdateClient( - UpdateClient( - Attributes { - height: block::Height(10919), - client_id: ClientId( - "07-tendermint-3", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 1, - height: 10907, - }, - }, - ), - ), - ], -} - -... - -EventBatch { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - height: block::Height(10924), - events: [ - UpdateClient( - UpdateClient( - Attributes { - height: block::Height(10924), - client_id: ClientId( - "07-tendermint-3", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 1, - height: 10912, - }, - }, - ), - ), - OpenAckConnection( - OpenAck( - Attributes { - height: block::Height(10924), - connection_id: Some( - ConnectionId( - "connection-3", - ), - ), - client_id: ClientId( - "07-tendermint-3", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-5", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-5", - ), - }, - ), - ), - ], -} -``` - -## Filter events - -The `listen` command accepts an `--event` flag to specify which event types to listen for. - -At the moment, two event types are available: -- `NewBlock` -- `Tx` - -The `--event` flag can be repeated to specify more than one event type. - -- To listen for only `NewBlock` events on `ibc-0`, invoke `hermes listen ibc-0 --event NewBlock` -- To listen for only `Tx` events on `ibc-0`, invoke `hermes listen ibc-0 --event Tx` -- To listen for both `NewBlock` and `Tx` events on `ibc-0`, invoke `hermes listen ibc-0 --e NewBlock --event Tx` - -If the `--event` flag is omitted, the relayer will subscribe to all event types. diff --git a/guide/src/commands/misbehaviour/index.md b/guide/src/commands/misbehaviour/index.md deleted file mode 100644 index 1d7f48cbad..0000000000 --- a/guide/src/commands/misbehaviour/index.md +++ /dev/null @@ -1,127 +0,0 @@ -# Misbehaviour - -## Table of Contents - - -## Monitoring Misbehaviour and Evidence Submission -Use the `mishbehaviour` command to monitor the updates for a given client, detect certain types of misbehaviour and -submit evidence to the chain. If the evidence passes the on-chain validation, the client is frozen. Further packets -cannot be relayed using the frozen client. - -```shell -USAGE: - hermes misbehaviour - -DESCRIPTION: - Listen to client update IBC events and handles misbehaviour - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain where client updates are monitored for misbehaviour - client_id identifier of the client to be monitored for misbehaviour -``` - -The misbehaviour monitor starts by analyzing all headers used in prior client updates. -Once finished it registers for update client events and checks any new headers for misbehaviour. -If it detects evidence of misbehaviour, it submits a transaction with the evidence to the chain. -If the chain validates the transaction then the monitor exits. - -> This is an experimental feature. - -The following types of misbehaviour are handled: -1. **Fork** - - Assumes at least one consensus state before the fork point exists. - Let existing consensus states on chain B be: `[Sn,.., Sf, Sf-1, S0]` with `Sf-1` being - the most recent state before the fork. - Chain A is queried for a header `Hf'` at `Sf.height` and if it is different than the `Hf` - in the event for the client update (the one that has generated `Sf` on chain), then the two - headers are included in the evidence and submitted. - Note that in this case the headers are different but have the same height. - -2. **BFT time violation for an unavailable header** - - Some header with a height that is higher than the latest - height on chain `A` has been accepted and a consensus state was created on `B`. Note that this implies - that the timestamp of this header must be within the `clock_drift` of the client. - Assume the client on `B` has been updated with `h2`(not present on/ produced by chain `A`) - and it has a timestamp of `t2` that is at most `clock_drift` in the future. - Then the latest header from `A` is fetched, let it be `h1`, with a timestamp of `t1`. - If `t1 >= t2` then evidence of misbehavior is submitted to A. - -__Example__ - -The `hermes misbehaviour` outputs an error message displaying `MISBEHAVIOUR DETECTED`: - -```shell -hermes misbehaviour ibc-0 07-tendermint-0 -``` - -```json -Apr 13 20:04:03.347 INFO ibc_relayer::foreign_client: checking misbehaviour for consensus state heights [Height { revision: 1, height: 195 }, Height { revision: 1, height: 85 }, Height { revision: 1, height: 28 }] -Apr 13 20:04:04.425 ERROR ibc_relayer::foreign_client: MISBEHAVIOUR DETECTED ClientId("07-tendermint-0") h1: Height { revision: 1, height: 195 }-Height { revision: 1, height: 85 } h2: Height { revision: 1, height: 195 }-Height { revision: 1, height: 85 }, sending evidence -Apr 13 20:04:05.070 INFO ibc_relayer_cli::commands::misbehaviour: evidence submission result [ClientMisbehaviour(ClientMisbehaviour(Attributes { height: Height { revision: 0, height: 1521 }, client_id: ClientId("07-tendermint-0"), client_type: Tendermint, consensus_height: Height { revision: 1, height: 195 } }))] - -Success: Some( - ClientMisbehaviour( - ClientMisbehaviour( - Attributes { - height: Height { - revision: 0, - height: 1521, - }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 1, - height: 195, - }, - }, - ), - ), -) -``` - -Querying client state from this point will show the client is in frozen state, with `frozen_height` indicating the height at which the client was frozen: -```shell -hermes query client state ibc-0 07-tendermint-0 | jq -``` -```json -{ - "result": { - "allow_update_after_expiry": true, - "allow_update_after_misbehaviour": true, - "chain_id": "ibc-1", - "frozen_height": { - "revision_height": 16, - "revision_number": 1 - }, - "latest_height": { - "revision_height": 16, - "revision_number": 1 - }, - "max_clock_drift": { - "nanos": 0, - "secs": 3 - }, - "trust_level": { - "denominator": "3", - "numerator": "1" - }, - "trusting_period": { - "nanos": 0, - "secs": 1209600 - }, - "unbonding_period": { - "nanos": 0, - "secs": 1814400 - }, - "upgrade_path": [ - "upgrade", - "upgradedIBCState" - ] - }, - "status": "success" -} -``` diff --git a/guide/src/commands/path-setup/channels.md b/guide/src/commands/path-setup/channels.md deleted file mode 100644 index ca47b1c116..0000000000 --- a/guide/src/commands/path-setup/channels.md +++ /dev/null @@ -1,486 +0,0 @@ -# Channel - -## Table of Contents - - - -## Establish Channel - -Use the `create channel` command to establish a new channel. - -```shell -USAGE: - hermes create channel [OPTIONS] --port-a --port-b [CONNECTION_A] - -DESCRIPTION: - Create a new channel between two chains using a pre-existing connection. - Alternatively, create a new client and a new connection underlying the new channel if a pre-existing connection is not provided. - -POSITIONAL ARGUMENTS: - Identifier of the side `a` chain for the new channel - Identifier of the connection on chain `a` to use in creating the new channel - -FLAGS: - -c, --chain-b Identifier of the side `b` chain for the new channel - -h, --help Print help information - --new-client-connection Indicates that a new client and connection will be created underlying the new channel - -o, --order The channel ordering, valid options 'unordered' (default) and 'ordered' [default: ORDER_UNORDERED] - --port-a Identifier of the side `a` port for the new channel - --port-b Identifier of the side `b` port for the new channel - -v, --channel-version The version for the new channel -``` - -## Examples - -### New channel over an existing connection - -This is the preferred way to create a new channel, by leveraging an existing -connection. - -Create a new unordered channel between `ibc-0` and `ibc-1` over an existing connection, -specifically the one we just created in the example above, with port name -`transfer` on both sides: - -```shell -hermes create channel ibc-0 --connection-a connection-0 --port-a transfer --port-b transfer -o unordered -``` - -Notice that one can omit the destination chain parameter, as Hermes will automatically -figure it out by looking up the given connection on `ibc-0`. - -```json -🥳 ibc-0 => OpenInitChannel( - OpenInit( - Attributes { - height: Height { revision: 0, height: 129 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-1")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: None - } - ) -) -🥳 ibc-1 => OpenTryChannel( - OpenTry( - Attributes { - height: Height { revision: 1, height: 126 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-1")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: Some(ChannelId("channel-1")) - } - ) -) -🥳 ibc-0 => OpenAckChannel( - OpenAck( - Attributes { - height: Height { revision: 0, height: 137 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-1")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: Some(ChannelId("channel-1")) - } - ) -) -🥳 ibc-1 => OpenConfirmChannel( - OpenConfirm( - Attributes { - height: Height { revision: 1, height: 129 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-1")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: Some(ChannelId("channel-1")) - } - ) -) -🥳 🥳 🥳 Channel handshake finished for Channel { - ordering: Unordered, - a_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-1", - ), - }, - b_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-1", - ), - }, - connection_delay: 0s, -} -Success: Channel { - ordering: Unordered, - a_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-1", - ), - }, - b_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-1", - ), - }, - connection_delay: 0s, -} -``` - -### New channel over a new connection - -Should you specifically want to create a new client and a new connection as part -of the `create channel` flow, that option exists, though this is the -less-preferred option over the previous flow, as creating new clients and -connections should only be done in certain specific circumstances so as not to -create redundant resources. - -Create a new unordered channel between `ibc-0` and `ibc-1` over a new -connection, using port name `transfer` on both sides and accepting the -interactive prompt that pops up notifying you that a new client and a new -connection will be initialized as part of the process: - -```shell -hermes create channel ibc-0 --chain-b ibc-1 --port-a transfer --port-b transfer -o unordered --new-client-connection -``` - -```json -🥂 ibc-0 => OpenInitConnection( - OpenInit( - Attributes { - height: Height { revision: 0, height: 66 }, - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - client_id: ClientId( - "07-tendermint-0", - ), - counterparty_connection_id: None, - counterparty_client_id: ClientId( - "07-tendermint-0", - ), - }, - ), -) - -🥂 ibc-1 => OpenTryConnection( - OpenTry( - Attributes { - height: Height { revision: 1, height: 64 }, - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - client_id: ClientId( - "07-tendermint-0", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-0", - ), - }, - ), -) - -🥂 ibc-0 => OpenAckConnection( - OpenAck( - Attributes { - height: Height { revision: 0, height: 76 }, - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - client_id: ClientId( - "07-tendermint-0", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-0", - ), - }, - ), -) - -🥂 ibc-1 => OpenConfirmConnection( - OpenConfirm( - Attributes { - height: Height { revision: 1, height: 68 }, - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - client_id: ClientId( - "07-tendermint-0", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-0", - ), - }, - ), -) - -🥂🥂🥂 Connection handshake finished for [Connection { - delay_period: 0s, - a_side: ConnectionSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - }, - b_side: ConnectionSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - }, -}] - -🥳 ibc-0 => OpenInitChannel( - OpenInit( - Attributes { - height: Height { revision: 0, height: 78 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-0")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: None - } - ) -) - -🥳 ibc-1 => OpenTryChannel( - OpenTry( - Attributes { - height: Height { revision: 1, height: 70 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-0")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: Some(ChannelId("channel-0")) - } - ) -) - -🥳 ibc-0 => OpenAckChannel( - OpenAck( - Attributes { - height: Height { revision: 0, height: 81 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-0")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: Some(ChannelId("channel-0")) - } - ) -) - -🥳 ibc-1 => OpenConfirmChannel - OpenConfirm - Attributes { - height: Height { revision: 1, height: 73 }, - port_id: PortId("transfer"), - channel_id: Some(ChannelId("channel-0")), - connection_id: ConnectionId("connection-0"), - counterparty_port_id: PortId("transfer"), - counterparty_channel_id: Some(ChannelId("channel-0")) - } - ) -) - -🥳 🥳 🥳 Channel handshake finished for Channel { - ordering: Unordered, - a_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - b_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - connection_delay: 0s, -} - -Success: Channel { - ordering: Unordered, - a_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - b_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - connection_delay: 0s, -} -``` - -A new channel with identifier `channel-0` on both sides has been established on -a new connection with identifier `connection-0` on both sides. diff --git a/guide/src/commands/path-setup/clients.md b/guide/src/commands/path-setup/clients.md deleted file mode 100644 index bfecbc7913..0000000000 --- a/guide/src/commands/path-setup/clients.md +++ /dev/null @@ -1,152 +0,0 @@ -# Client - -## Table of Contents - - - -## Create Client - -Use the `create client` command to create a new client on a destination chain, -tracking the state of the source chain. - -```shell -USAGE: - hermes create client [OPTIONS] - -ARGS: - - identifier of the destination chain - - - identifier of the source chain - -OPTIONS: - -d, --clock-drift - The maximum allowed clock drift for this client. - - The clock drift is a correction parameter. It helps deal with clocks that are only - approximately synchronized between the source and destination chains of this client. The - destination chain for this client uses the clock drift parameter when deciding to accept - or reject a new header (originating from the source chain) for this client. If this - option is not specified, a suitable clock drift value is derived from the chain - configurations. - - -p, --trusting-period - Override the trusting period specified in the config. - - The trusting period specifies how long a validator set is trusted for (must be shorter - than the chain's unbonding period). - - -t, --trust-threshold - Override the trust threshold specified in the configuration. - - The trust threshold defines what fraction of the total voting power of a known and - trusted validator set is sufficient for a commit to be accepted going forward. -``` - -__Example__ - -Create a new client on `ibc-0` which tracks `ibc-1`: - -```shell -hermes create client ibc-0 ibc-1 -``` - -```json - CreateClient( - Attributes { - height: Height { - revision: 0, - height: 286, - }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 1, - height: 274, - }, - }, - ), -) -``` - -A new client is created with identifier `07-tendermint-1` - -## Update Client - -Use the `update client` command to update an existing client with a new consensus state. -Specific update and trusted heights can be specified. - -```shell -USAGE: - hermes update client [OPTIONS] - -ARGS: - identifier of the destination chain - identifier of the client to be updated on destination chain - -OPTIONS: - -h, --help Print help information - -H, --target-height the target height of the client update - -t, --trusted-height the trusted height of the client update -``` - -__Update client with latest header__ - -the client on `ibc-0` with latest header of `ibc-1`: - -```shell -hermes update client ibc-0 07-tendermint-9 -``` - -```json -Success: UpdateClient( - UpdateClient { - common: Attributes { - height: Height { revision: 0, height: 303 }, - client_id: ClientId( - "07-tendermint-1", - ), - client_type: Tendermint, - consensus_height: Height { revision: 1, height: 293 }, - }, - header: Some( - Tendermint( - Header {...}, - ), - ), - }, -) -``` - -The client with identifier `07-tendermint-1` has been updated with the consensus state at height `1-293`. - -__Update a client to a specific target height__ - -```shell -hermes update client ibc-0 07-tendermint-1 --target-height 320 --trusted-height 293 -``` - -```json -Success: UpdateClient( - UpdateClient { - common: Attributes { - height: Height { revision: 0, height: 555 }, - client_id: ClientId( - "07-tendermint-1", - ), - client_type: Tendermint, - consensus_height: Height { revision: 1, height: 320 }, - }, - header: Some( - Tendermint( - Header {...}, - ), - ), - }, -) -``` - -The client with identifier `07-tendermint-1` has been updated with the consensus state at height `1-320`, as specified. diff --git a/guide/src/commands/path-setup/connections.md b/guide/src/commands/path-setup/connections.md deleted file mode 100644 index 2c8ec5a735..0000000000 --- a/guide/src/commands/path-setup/connections.md +++ /dev/null @@ -1,215 +0,0 @@ -# Connection - -## Table of Contents - - - -## Establish Connection -Use the `create connection` command to create a new connection. - -```shell -USAGE: - hermes create connection - -DESCRIPTION: - Create a new connection between two chains - -POSITIONAL ARGUMENTS: - chain_a_id identifier of the side `a` chain for the new connection - chain_b_id identifier of the side `b` chain for the new connection - -FLAGS: - --client-a CLIENT-A identifier of client hosted on chain `a`; default: None (creates a new client) - --client-b CLIENT-B identifier of client hosted on chain `b`; default: None (creates a new client) - --delay DELAY delay period parameter for the new connection (seconds) (default: 0) -``` - -## Examples - -### New connection over new clients - -Create a new connection between `ibc-0` and `ibc-1` over new clients: - -```shell -hermes create connection ibc-0 ibc-1 -``` - -```json -🥂 ibc-0 => OpenInitConnection( - OpenInit( - Attributes { - height: Height { revision: 0, height: 4073 }, - connection_id: Some( - ConnectionId( - "connection-8", - ), - ), - client_id: ClientId( - "07-tendermint-8", - ), - counterparty_connection_id: None, - counterparty_client_id: ClientId( - "07-tendermint-8", - ), - }, - ), -) - -🥂 ibc-1 => OpenTryConnection( - OpenTry( - Attributes { - height: Height { revision: 1, height: 4069 }, - connection_id: Some( - ConnectionId( - "connection-8", - ), - ), - client_id: ClientId( - "07-tendermint-8", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-8", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-8", - ), - }, - ), -) - -🥂 ibc-0 => OpenAckConnection( - OpenAck( - Attributes { - height: Height { revision: 0, height: 4081 }, - connection_id: Some( - ConnectionId( - "connection-8", - ), - ), - client_id: ClientId( - "07-tendermint-8", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-8", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-8", - ), - }, - ), -) - -🥂 ibc-1 => OpenConfirmConnection( - OpenConfirm( - Attributes { - height: Height { revision: 1, height: 4073 }, - connection_id: Some( - ConnectionId( - "connection-8", - ), - ), - client_id: ClientId( - "07-tendermint-8", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-8", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-8", - ), - }, - ), -) - -🥂🥂🥂 Connection handshake finished for [Connection { - delay_period: 0s, - a_side: ConnectionSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-8", - ), - connection_id: ConnectionId( - "connection-8", - ), - }, - b_side: ConnectionSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-8", - ), - connection_id: ConnectionId( - "connection-8", - ), - }, -}] - -Success: Connection { - delay_period: 0s, - a_side: ConnectionSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-8", - ), - connection_id: ConnectionId( - "connection-8", - ), - }, - b_side: ConnectionSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-8", - ), - connection_id: ConnectionId( - "connection-8", - ), - }, -} -``` - -### New connection over existing clients - -Create a new connection between `ibc-0` and `ibc-1` over existing clients, -both with client id `07-tendermint-0`: - -```shell -hermes create connection ibc-0 --client-a 07-tendermint-0 --client-b -07-tendermint-0 -``` - - -Notice that one can omit the destination chain parameter, as Hermes will automatically -figure it out by looking up the given client on `ibc-0`. - -## Non-zero Delay Connection - -A connection can be created with a delay period parameter. This parameter specifies a period of time that must elpase after a successful client state update and before a packet with proofs using its commitment root can pe processed on chain. For more information see [how packet delay works](../relaying/index.md#packet-delay) and the [connection delay specification](https://github.com/cosmos/ibc/tree/master/spec/core/ics-003-connection-semantics). diff --git a/guide/src/commands/path-setup/index.md b/guide/src/commands/path-setup/index.md deleted file mode 100644 index ae654ca77a..0000000000 --- a/guide/src/commands/path-setup/index.md +++ /dev/null @@ -1,43 +0,0 @@ -# Path Setup - -This section describes a number of commands that can be used to manage clients, connections, channels. - -| CLI name | Description | -| ---------------------- | --------------------------------------------------------------------------------------------------------------- | -| `create client` | [Create a client for source chain on destination chain](./clients.md#create-client) | -| `update client` | [Update the specified client on destination chain](./clients.md#md-client) | -| `create connection` | [Establish a connection using existing or new clients](./connections.md#establish-connection) | -| `create channel` | [Establish a channel using a pre-existing connection, or alternatively create a new client and a new connection underlying the new channel](./channels.md#establish-channel) | - - -## Create -Use the `create` commands to create new clients, connections, and channels. - -```shell -USAGE: - hermes create - -DESCRIPTION: - Create objects (client, connection, or channel) on chains - -SUBCOMMANDS: - help Get usage information - client Create a new IBC client - connection Create a new connection between two chains - channel Create a new channel between two chains -``` - -## Update -Use the `update` commands to update a client. - -```shell -USAGE: - hermes update - -DESCRIPTION: - Update objects (clients) on chains - -SUBCOMMANDS: - help Get usage information - client Update an IBC client -``` diff --git a/guide/src/commands/queries/channel.md b/guide/src/commands/queries/channel.md deleted file mode 100644 index fed5deb4ea..0000000000 --- a/guide/src/commands/queries/channel.md +++ /dev/null @@ -1,413 +0,0 @@ -# Table of Contents - - - -# Query Channels - -Use the `query channels` command to query the identifiers of all channels on a given chain. - -```shell -USAGE: - hermes query channels - -DESCRIPTION: - Query the identifiers of all channels on a given chain - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query -``` - -__Example__ - -Query all channels on `ibc-1`: - -```shell -hermes query channels ibc-1 -``` - -```json -Success: [ - PortChannelId { - channel_id: ChannelId( - "channel-0", - ), - port_id: PortId( - "transfer", - ), - }, - PortChannelId { - channel_id: ChannelId( - "channel-1", - ), - port_id: PortId( - "transfer", - ), - }, -] -``` - -# Query Channel Data - -Use the `query channel` commands to query the information about a specific channel. - -```shell -USAGE: - hermes query channel - -DESCRIPTION: - Query information about channels - -SUBCOMMANDS: - client Query channel's client state - end Query channel end - ends Query channel ends and underlying connection and client objects -``` - -## Query the channel end data - -Use the `query channel end` command to query the channel end: - -```shell -USAGE: - hermes query channel end - -DESCRIPTION: - Query channel end - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - port_id identifier of the port to query - channel_id identifier of the channel to query - -FLAGS: - -H, --height HEIGHT height of the state to query -``` - -__Example__ - -Query the channel end of channel `channel-1` on port `transfer` on `ibc-1`: - -```shell -hermes query channel end ibc-1 transfer channel-1 -``` - -```json -Success: ChannelEnd { - state: Open, - ordering: Unordered, - remote: Counterparty { - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-0", - ), - ), - }, - connection_hops: [ - ConnectionId( - "connection-1", - ), - ], - version: "ics20-1", -} -``` - -## Query the channel data for both ends of a channel - - -Use the `query channel ends` command to obtain both ends of a channel: - -```shell -USAGE: - hermes query channel ends - -DESCRIPTION: - Query channel ends and underlying connection and client objects - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - port_id identifier of the port to query - channel_id identifier of the channel to query - -FLAGS: - -H, --height HEIGHT height of the state to query - -v, --verbose enable verbose output, displaying all details of channels, connections & clients -``` - -__Example__ - -Query the channel end of channel `channel-1` on port `transfer` on `ibc-0`: - -```shell -hermes query channel ends ibc-0 transfer channel-1 -``` - -```json -Success: ChannelEndsSummary { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - client_id: ClientId( - "07-tendermint-1", - ), - connection_id: ConnectionId( - "connection-1", - ), - channel_id: ChannelId( - "channel-1", - ), - port_id: PortId( - "transfer", - ), - counterparty_chain_id: ChainId { - id: "ibc-2", - version: 2, - }, - counterparty_client_id: ClientId( - "07-tendermint-1", - ), - counterparty_connection_id: ConnectionId( - "connection-1", - ), - counterparty_channel_id: ChannelId( - "channel-1", - ), - counterparty_port_id: PortId( - "transfer", - ), -} -``` - -Passing the `-v` flag will additionally print all the details of the -channel, connection, and client on both ends. - -## Query the channel client state - -Use the `query channel client` command to obtain the channel's client state: - -```shell -USAGE: - hermes query channel client --port-id --channel-id - -DESCRIPTION: - Query channel's client state - -ARGS: - identifier of the chain to query - -FLAGS: - --channel-id identifier of the channel to query - --port-id identifier of the port to query -``` - -If the command is successful a message with the following format will be displayed: -``` -Success: Some( - IdentifiedAnyClientState { - client_id: ClientId( - "07-tendermint-0", - ), - client_state: Tendermint( - ClientState { - chain_id: ChainId { - id: "network2", - version: 0, - }, - trust_level: TrustThreshold { - numerator: 1, - denominator: 3, - }, - trusting_period: 1209600s, - unbonding_period: 1814400s, - max_clock_drift: 40s, - latest_height: Height { - revision: 0, - height: 2775, - }, - proof_specs: ProofSpecs( - [ - ProofSpec( - ProofSpec { - leaf_spec: Some( - LeafOp { - hash: Sha256, - prehash_key: NoHash, - prehash_value: Sha256, - length: VarProto, - prefix: [ - 0, - ], - }, - ), - inner_spec: Some( - InnerSpec { - child_order: [ - 0, - 1, - ], - child_size: 33, - min_prefix_length: 4, - max_prefix_length: 12, - empty_child: [], - hash: Sha256, - }, - ), - max_depth: 0, - min_depth: 0, - }, - ), - ProofSpec( - ProofSpec { - leaf_spec: Some( - LeafOp { - hash: Sha256, - prehash_key: NoHash, - prehash_value: Sha256, - length: VarProto, - prefix: [ - 0, - ], - }, - ), - inner_spec: Some( - InnerSpec { - child_order: [ - 0, - 1, - ], - child_size: 32, - min_prefix_length: 1, - max_prefix_length: 1, - empty_child: [], - hash: Sha256, - }, - ), - max_depth: 0, - min_depth: 0, - }, - ), - ], - ), - upgrade_path: [ - "upgrade", - "upgradedIBCState", - ], - allow_update: AllowUpdate { - after_expiry: true, - after_misbehaviour: true, - }, - frozen_height: None, - }, - ), - }, -) -``` - -**JSON:** - -```shell - hermes --json query channel client --port-id --channel-id -``` -or - -```shell - hermes -j query channel client --port-id --channel-id -``` - -If the command is successful a message with the following format will be displayed: - -```json -{ - "result": - { - "client_id":"07-tendermint-0", - "client_state": - { - "allow_update": - { - "after_expiry":true, - "after_misbehaviour":true - }, - "chain_id":"network2", - "frozen_height":null, - "latest_height": - { - "revision_height":2775, - "revision_number":0 - }, - "max_clock_drift": - { - "nanos":0, - "secs":40 - }, - "proof_specs": - [ - { - "inner_spec": - { - "child_order":[0,1], - "child_size":33, - "empty_child":"", - "hash":1, - "max_prefix_length":12, - "min_prefix_length":4 - }, - "leaf_spec": - { - "hash":1, - "length":1, - "prefix":"AA==", - "prehash_key":0, - "prehash_value":1 - }, - "max_depth":0, - "min_depth":0 - }, - { - "inner_spec": - { - "child_order":[0,1], - "child_size":32, - "empty_child":"", - "hash":1, - "max_prefix_length":1, - "min_prefix_length":1 - }, - "leaf_spec": - { - "hash":1, - "length":1, - "prefix":"AA==", - "prehash_key":0, - "prehash_value":1 - }, - "max_depth":0, - "min_depth":0 - } - ], - "trust_level": - { - "denominator":3, - "numerator":1 - }, - "trusting_period": - { - "nanos":0, - "secs":1209600 - }, - "type":"Tendermint", - "unbonding_period": - { - "nanos":0, - "secs":1814400 - }, - "upgrade_path":["upgrade","upgradedIBCState"] - }, - "type":"IdentifiedAnyClientState" - }, - "status":"success" -} -``` \ No newline at end of file diff --git a/guide/src/commands/queries/client.md b/guide/src/commands/queries/client.md deleted file mode 100644 index 1603f8332e..0000000000 --- a/guide/src/commands/queries/client.md +++ /dev/null @@ -1,306 +0,0 @@ - -# Table of Contents - - - -# Query Clients - -Use the `query clients` command to query the identifiers of all clients on a given chain. - -```shell -USAGE: - hermes query clients - -DESCRIPTION: - Query the identifiers of all clients on a chain - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - -FLAGS: - -s, --src-chain-id ID filter for clients which target a specific chain id (implies '-o') - -o, --omit-chain-ids omit printing the source chain for each client (default: false) -``` - -__Example__ - -Query all clients on `ibc-1`: - -```shell -hermes query clients ibc-1 -``` - -```json -Success: [ - ClientChain { - client_id: ClientId( - "07-tendermint-0", - ), - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - }, - ClientChain { - client_id: ClientId( - "07-tendermint-1", - ), - chain_id: ChainId { - id: "ibc-2", - version: 2, - }, - }, -] -``` - -Query all clients on `ibc-1` having `ibc-2` as their source chain: - -```shell -hermes query clients ibc-1 -s ibc-2 -``` - -```json -Success: [ - ClientId( - "07-tendermint-1", - ), -] -``` - -# Query Client Data - -Use the `query client` command to query the information about a specific client. - -```shell -USAGE: - hermes query client - -DESCRIPTION: - Query information about clients - -SUBCOMMANDS: - state query client full state - consensus query client consensus - connections query client connections -``` - -## Query the client state - -Use the `query client state` command to query the client state of a client: - -```shell -USAGE: - hermes query client state - -DESCRIPTION: - Query client full state - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - client_id identifier of the client to query - -FLAGS: - -H, --height HEIGHT the chain height which this query should reflect -``` - -__Example__ - -Query the state of client `07-tendermint-2` on `ibc-1`: - -```shell -hermes query client state ibc-1 07-tendermint-1 -``` - -```json -Success: ClientState { - chain_id: ChainId { - id: "ibc-2", - version: 2, - }, - trust_level: TrustThresholdFraction { - numerator: 1, - denominator: 3, - }, - trusting_period: 1209600s, - unbonding_period: 1814400s, - max_clock_drift: 3s, - frozen_height: Height { - revision: 0, - height: 0, - }, - latest_height: Height { - revision: 2, - height: 3069, - }, - upgrade_path: [ - "upgrade", - "upgradedIBCState", - ], - allow_update_after_expiry: true, - allow_update_after_misbehaviour: true, -} -``` - -## Query the client consensus state - -Use the `query client consensus` command to query the consensus states of a given client, or the state at a specified height: - -```shell -USAGE: - hermes query client consensus - -DESCRIPTION: - Query client consensus state - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - client_id identifier of the client to query - -FLAGS: - -c, --consensus-height CONSENSUS-HEIGHT - -s, --heights-only show only consensus heights - -H, --height HEIGHT the chain height context to be used, applicable only to a specific height -``` - -__Example__ - -Query the states of client `07-tendermint-0` on `ibc-0`: - -```shell -hermes query client consensus ibc-0 07-tendermint-0 --heights-only -``` - -```json -Success: [ - Height { - revision: 1, - height: 3049, - }, - Height { - revision: 1, - height: 2888, - }, - Height { - revision: 1, - height: 2736, - }, - Height { - revision: 1, - height: 2729, - }, - Height { - revision: 1, - height: 2724, - }, - Height { - revision: 1, - height: 2717, - }, -] -``` - -Query `ibc-0` at height `2800` for the consensus state for height `2724`: - -```shell -hermes query client consensus ibc-0 07-tendermint-0 -c 2724 -h 2800 -``` - -```json -Success: ConsensusState { - timestamp: Time( - 2021-04-13T14:11:20.969154Z - ), - root: CommitmentRoot( - "371DD19003221B60162D42C78FD86ABF95A572F3D9497084584B75F97B05B70C" - ), - next_validators_hash: Hash::Sha256( - 740950668B6705A136D041914FC219045B1D0AD1C6A284C626BF5116005A98A7 - ), -} -``` - -## Query the identifiers of all connections associated with a given client - -Use the `query client connections` command to query the connections associated with a given client: - -```shell -USAGE: - hermes query client connections - -DESCRIPTION: - Query client connections - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - client_id identifier of the client to query - -FLAGS: - -H, --height HEIGHT the chain height which this query should reflect -``` - -__Example__ - -Query the connections of client `07-tendermint-0` on `ibc-0`: - -```shell -hermes query client connections ibc-0 07-tendermint-0 -``` - -```json -Success: [ - ConnectionId("connection-0"), - ConnectionId("connection-1"), -] -``` - -## Query for the header used in a client update at a certain height - -``` -USAGE: - hermes query client header - -DESCRIPTION: - Query for the header used in a client update at a certain height - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - client_id identifier of the client to query - consensus_height height of header to query - -FLAGS: - -H, --height HEIGHT the chain height context for the query -``` - -__Example__ - -Query for the header used in the `07-tendermint-0` client update at height 2724 on `ibc-0`: - -```shell -hermes query client header ibc-0 07-tendermint-0 2724 -``` - -```json -Success: [ - UpdateClient( - UpdateClient { - common: Attributes { - height: Height { - revision: 0, - height: 0, - }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 1, - height: 2724, - }, - }, - header: Some( - Tendermint(...), - ), - }, - ), -] -``` diff --git a/guide/src/commands/queries/connection.md b/guide/src/commands/queries/connection.md deleted file mode 100644 index 2f2c4ad61f..0000000000 --- a/guide/src/commands/queries/connection.md +++ /dev/null @@ -1,155 +0,0 @@ -# Table of Contents - - - -# Query Connections - -Use the `query connections` command to query the identifiers of all connections on a given chain. - -```shell -USAGE: - hermes query connections - -DESCRIPTION: - Query the identifiers of all connections on a chain - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query -``` - -__Example__ - -Query all connections on `ibc-1`: - -```shell -hermes query connections ibc-1 -``` - -```json -Success: [ - ConnectionId( - "connection-0", - ), - ConnectionId( - "connection-1", - ), -] -``` - -# Query Connection Data - -Use the `query connection` commands to query a specific connection. - -```shell -USAGE: - hermes query connection - -DESCRIPTION: - Query information about connection(s) - -SUBCOMMANDS: - end query connection end - channels query connection channels -``` - -## Query the connection end data - -Use the `query connection end` command to query the connection end: - -```shell -USAGE: - hermes query connection end - -DESCRIPTION: - query connection end - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - connection_id identifier of the connection to query - -FLAGS: - -H, --height HEIGHT height of the state to query -``` - -__Example__ - -Query the connection end of connection `connection-1` on `ibc-1`: - -```shell -hermes query connection end ibc-1 connection-1 -``` - -```json -Success: ConnectionEnd { - state: Open, - client_id: ClientId( - "07-tendermint-1", - ), - counterparty: Counterparty { - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - prefix: ibc, - }, - versions: [ - Version { - identifier: "1", - features: [ - "ORDER_ORDERED", - "ORDER_UNORDERED", - ], - }, - ], - delay_period: 0s, -} -``` - -## Query the identifiers of all channels associated with a given connection - -Use the `query connection channels` command to query the identifiers of the channels associated with a given connection: - -```shell -USAGE: - hermes query connection channels - -DESCRIPTION: - query connection channels - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - connection_id identifier of the connection to query -``` - -__Example__ - -Query the channels associated with connection `connection-1` on `ibc-1`: - -```shell -hermes query connection channels ibc-1 connection-1 -``` - -```json -Success: [ - PortChannelId { - channel_id: ChannelId( - "channel-0", - ), - port_id: PortId( - "transfer", - ), - }, - PortChannelId { - channel_id: ChannelId( - "channel-1", - ), - port_id: PortId( - "transfer", - ), - }, -] -``` diff --git a/guide/src/commands/queries/index.md b/guide/src/commands/queries/index.md deleted file mode 100644 index 67eeeba748..0000000000 --- a/guide/src/commands/queries/index.md +++ /dev/null @@ -1,35 +0,0 @@ -# Queries - -Hermes supports querying for different objects that exist on a configured chain. - -The `query` command provides the following sub-commands: - -| CLI name | Description | -| ---------------------- | ------------------------------------------------------------------------------ | -| `client` | [Query information about clients](./client.md) | -| `clients` | [Query all clients](./client.md) | -| `connection` | [Query information about connections](./connection.md) | -| `connections` | [Query the identifiers of all connections on a chain](./connection.md) | -| `channel` | [Query information about channels](./channel.md) | -| `channels` | [Query the identifiers of all channels on a given chain](./channel.md) | -| `packet` | [Query information about packets](./packet.md) | -| `tx` | [Query information about transactions](./tx.md) | - -## Usage - -``` -USAGE: - hermes query - -DESCRIPTION: - Query objects from the chain - -SUBCOMMANDS: - client Query information about clients - clients Query clients - connection Query information about connections - connections Query the identifiers of all connections on a chain - channel Query information about channels - channels Query the identifiers of all channels on a given chain - packet Query information about packets -``` diff --git a/guide/src/commands/queries/packet.md b/guide/src/commands/queries/packet.md deleted file mode 100644 index fa7b8a9b5f..0000000000 --- a/guide/src/commands/queries/packet.md +++ /dev/null @@ -1,290 +0,0 @@ - -# Packet Queries - -Use the `query packet` commands to query information about packets. - - -```shell -USAGE: - hermes query packet - -OPTIONS: - -h, --help Print help information - -SUBCOMMANDS: - ack Query packet acknowledgment - acks Query packet acknowledgments - commitment Query packet commitment - commitments Query packet commitments - pending Output a summary of pending packets in both directions - unreceived-acks Query unreceived acknowledgments - unreceived-packets Query unreceived packets - help Print this message or the help of the given subcommand(s) -``` - -## Table of Contents - - - - -## Pending Packets - -Use the `query packet pending` command to query the sequence numbers of all packets that have not yet been received or acknowledged, at both ends of a channel. - -```shell -USAGE: - hermes query packet pending - -ARGS: - identifier of the chain at one end of the channel - port identifier on the chain given by - channel identifier on the chain given by -``` - -__Example__ - -Query the sequence numbers of all packets that either not yet been received or not yet been acknowledged, at both ends of the channel `channel-1`. - -```shell -$ hermes query packet pending ibc-0 tranfer channel-1 -``` - -```json -Success: Summary { - forward: PendingPackets { - unreceived_packets: [ - 2203, - ... - 2212, - ], - unreceived_acks: [ - 2183, - ... - 2202, - ], - }, - reverse: PendingPackets { - unreceived_packets: [ - 14, - ... - 23, - ], - unreceived_acks: [ - 4, - ... - 13, - ], - }, -} -``` - - -## Packet Commitments - -Use the `query packet commitments` command to query the sequence numbers of all packets that have been sent but not yet acknowledged (these are the packets that still have their commitments stored). - -```shell -USAGE: - hermes query packet commitments - -DESCRIPTION: - Query packet commitments - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - port_id identifier of the port to query - channel_id identifier of the channel to query -``` - -__Example__ - -Query `ibc-0` for the sequence numbers of packets that still have commitments on `ibc-0` and that were sent on `transfer` port and `channel-0`: - -```shell -hermes query packet commitments ibc-0 transfer channel-0 -``` - -```json -Success: PacketSeqs { - height: Height { - revision: 0, - height: 9154, - }, - seqs: [ - 1, - 2, - 3 - ], -} -``` - -## Packet Commitment with Sequence - -Use the `query packet commitment` command to query the commitment value of a packet with a given sequence number. - -```shell -USAGE: - hermes query packet commitment - -DESCRIPTION: - Query packet commitment - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - port_id identifier of the port to query - channel_id identifier of the channel to query - sequence sequence of packet to query - -FLAGS: - -H, --height HEIGHT height of the state to query -``` - -__Example__ - -Query `ibc-0` for the commitment of packet with sequence `3` sent on `transfer` port and `channel-0`: - -```shell -hermes query packet commitment ibc-0 transfer channel-0 3 -``` - -```json -Success: "F9458DC7EBEBCD6D18E983FCAB5BD752CC2A74532BBD50B812DB229997739EFC" -``` - -## Packet Acknowledgments - -Use the `query packet acknowledgments` command to query the sequence numbers of all packets that have been acknowledged. - -```shell -USAGE: - hermes query packet acks - -DESCRIPTION: - Query packet acknowledgments - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - port_id identifier of the port to query - channel_id identifier of the channel to query -``` - -__Example__ - -Query `ibc-1` for the sequence numbers of packets acknowledged that were received on `transfer` port and `channel-1`: - -```shell -hermes query packet acks ibc-1 transfer channel-1 -``` - -```json -Success: PacketSeqs { - height: Height { - revision: 1, - height: 9547, - }, - seqs: [ - 1, - 2, - 3 - ], -} -``` - -## Packet Acknowledgment with Sequence - -Use the `query packet acknowledgment` command to query the acknowledgment value of a packet with a given sequence number. - -```shell -USAGE: - hermes query packet ack - -DESCRIPTION: - Query packet acknowledgment - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - port_id identifier of the port to query - channel_id identifier of the channel to query - sequence sequence of packet to query - -FLAGS: - -H, --height HEIGHT height of the state to query -``` - -__Example__ - -Query `ibc-1` for the acknowledgment of packet with sequence `2` received on `transfer` port and `channel-1`: - -```shell -hermes query packet ack ibc-1 transfer channel-1 2 -``` - -```json -Success: "08F7557ED51826FE18D84512BF24EC75001EDBAF2123A477DF72A0A9F3640A7C" -``` - -## Unreceived Packets - -Use the `query packet unreceived-packets` command to query the sequence numbers of all packets that have been sent on the source chain but not yet received on the destination chain. - -```shell -USAGE: - hermes query packet unreceived-packets - -DESCRIPTION: - Query unreceived packets - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain for the unreceived sequences - port_id port identifier - channel_id channel identifier -``` - -__Example__ - -Query `transfer` port and `channel-1` on `ibc-1` for the sequence numbers of packets sent on `ibc-0` but not yet received: - -```shell -hermes query packet unreceived-packets ibc-1 transfer channel-1 -``` - -```json -Success: [ - 1, - 2, - 3 -] -``` - -## Unreceived Acknowledgments - -Use the `query packet unreceived-acks` command to query the sequence numbers of all packets that have not yet been acknowledged. - -```shell -USAGE: - hermes query packet unreceived-acks - -DESCRIPTION: - Query unreceived acknowledgments - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query the unreceived acknowledgments - port_id port identifier - channel_id channel identifier -``` - -__Example__ - -Query `transfer` port and `channel-0` on `ibc-0` for the sequence numbers of packets received by `ibc-1` but not yet acknowledged on `ibc-0`: - -```shell -hermes query packet unreceived-acks ibc-0 transfer channel-0 -``` - -```json -Success: [ - 1, - 2, - 3 -] -``` diff --git a/guide/src/commands/queries/tx.md b/guide/src/commands/queries/tx.md deleted file mode 100644 index 7af7c4c2a4..0000000000 --- a/guide/src/commands/queries/tx.md +++ /dev/null @@ -1,60 +0,0 @@ -# Tx Queries - -Use the `query tx` command to query information about transaction(s). - - -```shell -USAGE: - hermes query tx - -DESCRIPTION: - Query information about transactions - -SUBCOMMANDS: - events Query the events emitted by transaction -``` - -## Table of Contents - - - - -## Transaction Events - -Use the `query tx events` command to obtain a list of events that a chain generated as a consequence of -delivering a transaction. - -```shell -USAGE: - hermes query tx events - -DESCRIPTION: - Query the events emitted by transaction - -POSITIONAL ARGUMENTS: - chain_id identifier of the chain to query - hash transaction hash to query -``` - -__Example__ - -Query chain `ibc-0` for the events emitted due to transaction with hash -`6EDBBCBCB779F9FC9D6884ACDC4350E69720C4B362E4ACE6C576DE792F837490`: - -```shell -hermes query tx events ibc-0 6EDBBCBCB779F9FC9D6884ACDC4350E69720C4B362E4ACE6C576DE792F837490 -``` - -```json -Success: [ - SendPacket( - SendPacket { - height: Height { - revision: 4, - height: 6628239, - }, - packet: PortId("transfer") ChannelId("channel-139") Sequence(2), - }, - ), -] -``` \ No newline at end of file diff --git a/guide/src/commands/raw/channel-close.md b/guide/src/commands/raw/channel-close.md deleted file mode 100644 index b04befeeb0..0000000000 --- a/guide/src/commands/raw/channel-close.md +++ /dev/null @@ -1,134 +0,0 @@ -# Channel Close Handshake - -The channel close handshake involves two steps: init and confirm. - -## Table of Contents - - - -## Channel Close Init - -Use the `chan-close-init` command to initialize the closure of a channel. - -```shell -USAGE: - hermes tx raw chan-close-init - -DESCRIPTION: - Initiate the closing of a channel (ChannelCloseInit) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_conn_id identifier of the destination connection - dst_port_id identifier of the destination port - src_port_id identifier of the source port - -FLAGS: - -d, --dst-chan-id ID identifier of the destination channel (required) - -s, --src-chan-id ID identifier of the source channel (required) -``` - -__Example__ - -```shell -hermes tx raw chan-close-init ibc-0 ibc-1 connection-0 transfer transfer -d channel-0 -s channel-1 -``` - -```json -Success: CloseInitChannel( - CloseInit( - Attributes { - height: Height { - revision: 0, - height: 77, - }, - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-0", - ), - ), - connection_id: ConnectionId( - "connection-0", - ), - counterparty_port_id: PortId( - "transfer", - ), - counterparty_channel_id: Some( - ChannelId( - "channel-1", - ), - ), - }, - ), -) -``` - -## Channel Close Confirm - -Use the `chan-close-confirm` command to confirm the closure of a channel. - -```shell -USAGE: - hermes tx raw chan-close-confirm - -DESCRIPTION: - Confirm the closing of a channel (ChannelCloseConfirm) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_conn_id identifier of the destination connection - dst_port_id identifier of the destination port - src_port_id identifier of the source port - -FLAGS: - -d, --dst-chan-id ID identifier of the destination channel (required) - -s, --src-chan-id ID identifier of the source channel (required) -``` - -__Example__ - -```shell -hermes tx raw chan-close-confirm ibc-1 ibc-0 connection-1 transfer transfer -d channel-1 -s channel-0 -``` - -```json -Success: CloseConfirmChannel( - CloseConfirm( - Attributes { - height: Height { - revision: 1, - height: 551, - }, - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-1", - ), - ), - connection_id: ConnectionId( - "connection-1", - ), - counterparty_port_id: PortId( - "transfer", - ), - counterparty_channel_id: Some( - ChannelId( - "channel-0", - ), - ), - }, - ), -) -``` - -__NOTE__: The `cosmos-sdk` transfer module implementation does not allow the user (`hermes` in this case) to initiate the closing of channels. -Therefore, when using the Gaia release image, the `chan-close-init` command -fails as the `MsgChannelCloseInit` message included in the transaction is rejected. -To be able to test channel closure, you need to [patch](../../help.md#patching-gaia) your gaia deployments. diff --git a/guide/src/commands/raw/channel-open.md b/guide/src/commands/raw/channel-open.md deleted file mode 100644 index 86bfb569d1..0000000000 --- a/guide/src/commands/raw/channel-open.md +++ /dev/null @@ -1,293 +0,0 @@ -# Channel Open Handshake - -The `tx raw` commands can be used to establish a channel for a given connection. Only `unordered` channels are currently supported. - -
- -```mermaid -sequenceDiagram - autonumber - participant A as ibc-1 - participant B as ibc-0 - Note over A, B: No channel - A->>B: ChannelOpenInit - Note over B: channel: channel-0 - Note over B: channel: counterparty: none - B->>A: ChannelOpenTry - Note over A: channel: channel-1 - Note over A: channel: counterparty: channel-0 - A->>B: ChannelOpenAck - note over B: channel: channel-0 - note over B: counterparty: channel-1 - B->>A: ChannelOpenConfirm - Note over A, B: Channel open -``` - -
- -## Table of Contents - - - -## Channel Open Init - -Use the `chan-open-init` command to initialize a new channel. - -```shell -USAGE: - hermes tx raw chan-open-init - -DESCRIPTION: - Initialize a channel (ChannelOpenInit) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_conn_id identifier of the destination connection - dst_port_id identifier of the destination port - src_port_id identifier of the source port - -FLAGS: - -o, --order ORDER the channel ordering, valid options 'unordered' (default) and 'ordered' -``` - -__Example__ - -First, let's initialize the channel on `ibc-0` using an existing connection identified by `connection-0`: - -```shell -hermes tx raw chan-open-init ibc-0 ibc-1 connection-0 transfer transfer -``` - -```json -Success: OpenInitChannel( - OpenInit( - Attributes { - height: Height { - revision: 0, - height: 3091 - }, - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-0", - ), - ), - connection_id: ConnectionId( - "connection-0", - ), - counterparty_port_id: PortId( - "transfer", - ), - counterparty_channel_id: None, - }, - ), -) -``` - -A new channel has been initialized on `ibc-1` with identifier `channel-0`. - -> Note that the `counterparty_channel_id` field is currently empty. - - -## Channel Open Try - -Use the `chan-open-try` command to establish a counterparty to the channel on the other chain. - -```shell -USAGE: - hermes tx raw chan-open-try - -DESCRIPTION: - Relay the channel attempt (ChannelOpenTry) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_conn_id identifier of the destination connection - dst_port_id identifier of the destination port - src_port_id identifier of the source port - -FLAGS: - -s, --src-chan-id ID identifier of the source channel (required) -``` - -__Example__ - -Let's now create the counterparty to `channel-0` on chain `ibc-1`: - -```shell -hermes tx raw chan-open-try ibc-1 ibc-0 connection-1 transfer transfer -s channel-0 -``` - -```json -Success: OpenTryChannel( - OpenTry( - Attributes { - height: Height { - revision: 1, - height: 3213 - }, - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-1", - ), - ), - connection_id: ConnectionId( - "connection-1", - ), - counterparty_port_id: PortId( - "transfer", - ), - counterparty_channel_id: Some( - ChannelId( - "channel-0", - ), - ), - }, - ), -) -``` - -A new channel has been created on `ibc-1` with identifier `channel-1`. - -> Note that the field `counterparty_channel_id` points to the channel on `ibc-0`. - - -## Channel Open Ack - -Use the `chan-open-ack` command to acknowledge the channel on the initial chain. - -```shell -USAGE: - hermes tx raw chan-open-ack - -DESCRIPTION: - Relay acknowledgment of a channel attempt (ChannelOpenAck) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_conn_id identifier of the destination connection - dst_port_id identifier of the destination port - src_port_id identifier of the source port - -FLAGS: - -d, --dst-chan-id ID identifier of the destination channel (required) - -s, --src-chan-id ID identifier of the source channel (required) -``` - -__Example__ - -We can now acknowledge on `ibc-0` that `ibc-1` has accepted the opening of the channel: - -```shell -hermes tx raw chan-open-ack ibc-0 ibc-1 connection-0 transfer transfer -d channel-0 -s channel-1 -``` - -```json -Success: OpenAckChannel( - OpenAck( - Attributes { - height: Height { - revision: 0, - height: 3301 - }, - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-0", - ), - ), - connection_id: ConnectionId( - "connection-0", - ), - counterparty_port_id: PortId( - "transfer", - ), - counterparty_channel_id: Some( - ChannelId( - "channel-1", - ), - ), - }, - ), -) -``` - -> Note that the field `counterparty_channel_id` now points to the channel on `ibc-1`. - - -## Channel Open Confirm - -Use the `chan-open-confirm` command to confirm that the channel has been acknowledged, -and finish the handshake, after which the channel is open on both chains. - -```shell -USAGE: - hermes tx raw chan-open-confirm - -DESCRIPTION: - Confirm opening of a channel (ChannelOpenConfirm) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_conn_id identifier of the destination connection - dst_port_id identifier of the destination port - src_port_id identifier of the source port - -FLAGS: - -d, --dst-chan-id ID identifier of the destination channel (required) - -s, --src-chan-id ID identifier of the source channel (required) -``` - -__Example__ - -Confirm on `ibc-1` that `ibc-0` has accepted the opening of the channel, -after which the channel is open on both chains. - -```shell -hermes tx raw chan-open-confirm ibc-1 ibc-0 connection-1 transfer transfer -d channel-1 -s channel-0 -``` - -```json - OpenConfirm( - Attributes { - height: Height { - revision: 1, - height: 3483 - }, - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-1", - ), - ), - connection_id: ConnectionId( - "connection-1", - ), - counterparty_port_id: PortId( - "transfer", - ), - counterparty_channel_id: Some( - ChannelId( - "channel-0", - ), - ), - }, - ), -) -``` - -We have now successfully opened a channel over an existing connection between the two chains. - diff --git a/guide/src/commands/raw/client.md b/guide/src/commands/raw/client.md deleted file mode 100644 index b0041246ef..0000000000 --- a/guide/src/commands/raw/client.md +++ /dev/null @@ -1,98 +0,0 @@ -# Client -The `tx raw` commands can be used to create and update the on-chain IBC clients. - -## Table of Contents - - -## Create Client -Use the `create-client` command to create a new client. - -```shell -USAGE: - hermes tx raw create-client - -DESCRIPTION: - Create a client for source chain on destination chain - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - -``` - -__Example__ - -Create a new client of `ibc-1` on `ibc-0`: - -```shell -hermes tx raw create-client ibc-0 ibc-1 -``` - -```json -{ - Success: CreateClient( - CreateClient( - Attributes { - height: Height { revision: 0, height: 43 }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { revision: 1, height: 32 }, - }, - ), - ) -} -``` - -A new client is created with identifier `07-tendermint-0` - -## Update Client -Use the `update-client` command to update an existing client with a new consensus state. -Specific update and trusted heights can be specified. - -```shell -USAGE: - hermes tx raw update-client - -DESCRIPTION: - Update the specified client on destination chain - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - dst_client_id identifier of the client to be updated on destination chain - -FLAGS: - -H, --target-height TARGET-HEIGHT - -t, --trusted-height TRUSTED-HEIGHT -``` - -__Example__ - -Update the client on `ibc-0` with latest header of `ibc-1` - -```shell -hermes tx raw update-client ibc-0 07-tendermint-0 -``` - -```json -Success: UpdateClient( - UpdateClient { - common: Attributes { - height: Height { revision: 0, height: 110 }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { revision: 1, height: 109 }, - }, - header: Some( - Tendermint( - Header {...}, - ), - ), - }, -) -``` - -The client with identifier `07-tendermint-0` has been updated with the consensus state at height `1-273`. diff --git a/guide/src/commands/raw/connection.md b/guide/src/commands/raw/connection.md deleted file mode 100644 index ce20d223fd..0000000000 --- a/guide/src/commands/raw/connection.md +++ /dev/null @@ -1,277 +0,0 @@ -# Connection Handshake - -The `tx raw` commands can be used to establish a connection between two clients. - -
- -```mermaid -sequenceDiagram - autonumber - participant A as ibc-1 - participant B as ibc-0 - Note over A, B: No connection - A->>B: ConnectionOpenInit - Note over B: connection: connection-0 - Note over B: counterparty: none - B->>A: ConnectionOpenTry - Note over A: connection: connection-1 - Note over A: counterparty: connection-0 - A->>B: ConnectionOpenAck - note over B: connection: connection-0 - note over B: counterparty: connection-1 - B->>A: ConnectionOpenConfirm - Note over A, B: Connection open -``` - -
- -## Table of Contents - - - -## Connection Init - -Use the `conn-init` command to initialize a new connection on a chain. - -```shell -USAGE: - hermes tx raw conn-init - -DESCRIPTION: - Initialize a connection (ConnectionOpenInit) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_client_id identifier of the destination client - src_client_id identifier of the source client -``` - -__Example__ - -Given that two clients were previously created with identifier `07-tendermint-0` on chain `ibc-0` and -identifier `07-tendermint-1` on chain `ibc-1`, we can initialize a connection between the two clients. - -First, let's initialize the connection on `ibc-0`: - -```shell -hermes tx raw conn-init ibc-0 ibc-1 07-tendermint-0 07-tendermint-1 -``` - -```json -Success: OpenInitConnection( - OpenInit( - Attributes { - height: Height { - revision: 0, - height: 73, - }, - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - client_id: ClientId( - "07-tendermint-0", - ), - counterparty_connection_id: None, - counterparty_client_id: ClientId( - "07-tendermint-1", - ), - }, - ), -) -``` - -A new connection has been initialized on `ibc-0` with identifier `connection-0`. - -> Note that the `counterparty_connection_id` field is currently empty. - - -## Connection Try - -Use the `conn-try` command to establish a counterparty to the connection on the other chain. - -```shell -USAGE: - hermes tx raw conn-try - -DESCRIPTION: - Relay the connection attempt (ConnectionOpenTry) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_client_id identifier of the destination client - src_client_id identifier of the source client - -FLAGS: - -s, --src-conn-id ID identifier of the source connection (required) -``` - -__Example__ - -Let's now create the counterparty to `connection-0` on chain `ibc-1`: - -```shell -hermes tx raw conn-try ibc-1 ibc-0 07-tendermint-1 07-tendermint-0 -s connection-0 -``` - -```json -Success: OpenTryConnection( - OpenTry( - Attributes { - height: Height { - revision: 1, - height: 88, - }, - connection_id: Some( - ConnectionId( - "connection-1", - ), - ), - client_id: ClientId( - "07-tendermint-1", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-0", - ), - }, - ), -) -``` - -A new connection has been created on `ibc-1` with identifier `connection-1`. - -> Note that the field `counterparty_connection_id` points to the connection on `ibc-0`. - - -## Connection Ack - -Use the `conn-ack` command to acknowledge the connection on the initial chain. - -```shell -USAGE: - hermes tx raw conn-ack - -DESCRIPTION: - Relay acknowledgment of a connection attempt (ConnectionOpenAck) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_client_id identifier of the destination client - src_client_id identifier of the source client - -FLAGS: - -d, --dst-conn-id ID identifier of the destination connection (required) - -s, --src-conn-id ID identifier of the source connection (required) -``` - -__Example__ - -We can now acknowledge on `ibc-0` that `ibc-1` has accepted the connection attempt: - -```shell -hermes tx raw conn-ack ibc-0 ibc-1 07-tendermint-0 07-tendermint-1 -d connection-0 -s connection-1 -``` - -```json -Success: OpenAckConnection( - OpenAck( - Attributes { - height: Height { - revision: 0, - height: 206, - }, - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - client_id: ClientId( - "07-tendermint-0", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-1", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-1", - ), - }, - ), -) -``` - -> Note that the field `counterparty_connection_id` now points to the connection on `ibc-1`. - - -## Connection Confirm - -Use the `conn-confirm` command to confirm that the connection has been acknowledged, -and finish the handshake, after which the connection is open on both chains. - -```shell -USAGE: - hermes tx raw conn-confirm - -DESCRIPTION: - Confirm opening of a connection (ConnectionOpenConfirm) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - dst_client_id identifier of the destination client - src_client_id identifier of the source client - -FLAGS: - -d, --dst-conn-id ID identifier of the destination connection (required) - -s, --src-conn-id ID identifier of the source connection (required) -``` - -__Example__ - -Confirm on `ibc-1` that `ibc-0` has accepted the connection attempt. - -```shell -hermes tx raw conn-confirm ibc-1 ibc-0 07-tendermint-1 07-tendermint-0 -d connection-1 -s connection-0 -``` - -```json -Success: OpenConfirmConnection( - OpenConfirm( - Attributes { - height: Height { - revision: 1, - height: 239, - }, - connection_id: Some( - ConnectionId( - "connection-1", - ), - ), - client_id: ClientId( - "07-tendermint-1", - ), - counterparty_connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - counterparty_client_id: ClientId( - "07-tendermint-0", - ), - }, - ), -) -``` - -We have now successfully established a connection between the two chains. - diff --git a/guide/src/commands/raw/index.md b/guide/src/commands/raw/index.md deleted file mode 100644 index c4c4093015..0000000000 --- a/guide/src/commands/raw/index.md +++ /dev/null @@ -1,85 +0,0 @@ -# Raw Transactions - -There are a number of simple commands that perform minimal validation, build and send IBC transactions. - -The `tx raw` command provides the following sub-commands: - -| CLI name | Description | -| ---------------------- | --------------------------------------------------------------------------------------------------------------- | -| `create-client` | [Create a client for source chain on destination chain](./client.md#create-client) | -| `update-client` | [Update the specified client on destination chain](./client.md#update-client) | -| `conn-init` | [Initialize a connection (ConnectionOpenInit)](./connection.md#connection-init) | -| `conn-try` | [Relay the connection attempt (ConnectionOpenTry)](./connection.md#connection-try) | -| `conn-ack` | [Relay acknowledgment of a connection attempt (ConnectionOpenAck)](./connection.md#connection-ack) | -| `conn-confirm` | [Confirm opening of a connection (ConnectionOpenConfirm)](./connection.md#connection-confirm) | -| `chan-open-init` | [Initialize a channel (ChannelOpenInit)](./channel-open.md#channel-open-init) | -| `chan-open-try` | [Relay the channel attempt (ChannelOpenTry)](./channel-open.md#channel-open-try) | -| `chan-open-ack` | [Relay acknowledgment of a channel attempt (ChannelOpenAck)](./channel-open.md#channel-open-ack) | -| `chan-open-close` | [Confirm opening of a channel (ChannelOpenConfirm)](./channel-open.md#channel-open-close) | -| `chan-close-init` | [Initiate the closing of a channel (ChannelCloseInit)](./channel-close.md#channel-close-init) | -| `chan-close-confirm` | [Confirm the closing of a channel (ChannelCloseConfirm)](./channel-close.md#channel-close-confirm) | -| `ft-transfer` | [Send a fungible token transfer test transaction (ICS20 MsgTransfer](./packet.md#fungible-token-transfer) | -| `packet-recv` | [Relay receive or timeout packets](./packet.md#relay-receive-and-timeout-packets) | -| `packet-ack` | [Relay acknowledgment packets](./packet.md#relay-acknowledgment-packets) | - -The main purpose of these commands is to support development and testing, and continuous integration. These CLIs take quite a few parameters and they are explained in the individual sub-sections. - -At a high level, most commands follow this template: - -```shell -hermes tx raw [-d -s ]* -``` - -In the command template above: - -- `ibc-datagram` - identifies the "main" IBC message that is being sent, e.g. `conn-init`, `conn-try`, `chan-open-init`, etc. To ensure successful processing on the receiving chain, the majority of these commands build and send two messages: one `UpdateClient` message followed by the actual IBC message. These two messages are included in a single transaction. This is done for all IBC datagrams that include proofs collected from the source chain. - - The messages that do not require proofs are: - - `MsgCreateClient` (`create-client` command), - - `MsgConnectionOpenInit` (`conn-open-init` command), - - `MsgChannelOpenInit` (`chan-open-init` command), - - `MsgChannelCloseInit` (`chan-close-init` command) and - - `MsgTransfer` (`ft-transfer` command) - -- `dst-chain-id` - is the identifier of the chain where the transaction will be sent. - -- `src-chain-id` - is the identifier of the chain that is queried for the data that is included in the transaction, e.g. connection data, client proofs, etc. To ensure correct on-chain state, the relayer also queries the destination chain, however it does not include this information in the Tx to the destination chain. - -- `dst-obj-id` - the identifier of an object on destination chain required by the datagram, e.g. the `client-id` associated with the connection on destination chain in connection datagrams. Or the `connection-id` in a `ConnOpenAck` datagram. - -- `src-obj-id` - the identifier of an object on the source chain, required by the datagram, e.d. the `client-id` of the connection on source chain. - -- More details about the `tx raw` commands can be found in the following sections: - - [Client](./client.md) - - [Connection](./connection.md) - - [Channel Open](./channel-open.md) - - [Channel Close](./channel-close.md) - - [Packet](./packet.md) - -## Usage - -```shell -USAGE: - hermes tx raw - -DESCRIPTION: - Raw commands for sending transactions to a configured chain. - -SUBCOMMANDS: - help Get usage information - create-client Create a client for source chain on destination chain - update-client Update the specified client on destination chain - conn-init Initialize a connection (ConnectionOpenInit) - conn-try Relay the connection attempt (ConnectionOpenTry) - conn-ack Relay acknowledgment of a connection attempt (ConnectionOpenAck) - conn-confirm Confirm opening of a connection (ConnectionOpenConfirm) - chan-open-init Initialize a channel (ChannelOpenInit) - chan-open-try Relay the channel attempt (ChannelOpenTry) - chan-open-ack Relay acknowledgment of a channel attempt (ChannelOpenAck) - chan-open-confirm Confirm opening of a channel (ChannelOpenConfirm) - chan-close-init Initiate the closing of a channel (ChannelCloseInit) - chan-close-confirm Confirm the closing of a channel (ChannelCloseConfirm) - ft-transfer Send a fungible token transfer test transaction (ICS20 MsgTransfer) - packet-recv Relay receive or timeout packets - packet-ack Relay acknowledgment packets -``` diff --git a/guide/src/commands/raw/packet.md b/guide/src/commands/raw/packet.md deleted file mode 100644 index e87bbacf29..0000000000 --- a/guide/src/commands/raw/packet.md +++ /dev/null @@ -1,272 +0,0 @@ -# Packet Tx Commands - -## Table of Contents - - - -## Fungible token transfer - -Use the `tx raw ft-transfer` command to send ICS-20 fungible token transfer packets. -__NOTE:__ This command is mainly used for testing the packet features of the relayer. - -```shell -USAGE: - hermes tx raw ft-transfer - -DESCRIPTION: - Send a fungible token transfer test transaction (ICS20 MsgTransfer) - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - src_port_id identifier of the source port - src_channel_id identifier of the source channel - amount amount of coins (samoleans, by default) to send (e.g. `100000`) - -FLAGS: - -o, --timeout-height-offset TIMEOUT-HEIGHT-OFFSET timeout in number of blocks since current - -t, --timeout-seconds TIMEOUT-SECONDS timeout in seconds since current - -r, --receiver RECEIVER receiving account address on the destination chain - -d, --denom DENOM denomination of the coins to send (default: samoleans) - -n, --number-msgs NUMBER-MSGS number of messages to send - -k, --key KEY use the given signing key (default: `key_name` config) -``` - -__Example__ - -Send two transfer packets from the `transfer` module and `channel-0` of `ibc-0` to `ibc-1`. Each transfer if for `9999` samoleans (default denomination) and a timeout offset of `10` blocks. The transfer fee is paid by the relayer account on `ibc-1`. - -```shell -hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 9999 -o 1000 -n 2 -``` - -```json -Success: [ - SendPacket( - SendPacket { - height: Height { - revision: 0, - height: 431, - }, - packet: PortId("transfer") ChannelId("channel-0") Sequence(4), - }, - ), - SendPacket( - SendPacket { - height: Height { - revision: 0, - height: 431, - }, - packet: PortId("transfer") ChannelId("channel-0") Sequence(5), - }, - ), -] -``` - -The transfer packets are stored on `ibc-0` and can be relayed. - -> To send transfer packets with a custom receiver address use the `--receiver | -r` flag. - -```shell -hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 9999 -o 1000 -n 1 -r board:1938586739 -``` - -```json -Success: [ - SendPacket( - SendPacket { - height: Height { - revision: 0, - height: 546, - }, - packet: PortId("transfer") ChannelId("channel-0") Sequence(7), - }, - ), -] -``` - -## Relay receive and timeout packets - -Use the `tx raw packet-recv` command to relay the packets sent but not yet received. If the sent packets have timed out then a timeout packet is sent to the source chain. - -```shell -USAGE: - hermes tx raw packet-recv - -DESCRIPTION: - Relay receive or timeout packets - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - src_port_id identifier of the source port - src_channel_id identifier of the source channel -``` - -__Example__ - -Send the two transfer packets to the `ibc-1` module bound to the `transfer` port and the `channel-0`'s counterparty. - -__NOTE__: The relayer prepends a client update message before the receive messages. - -```shell -hermes tx raw packet-recv ibc-1 ibc-0 transfer channel-0 -``` - -```json -Success: [ - UpdateClient( - UpdateClient { - common: Attributes { - height: Height { - revision: 1, - height: 439, - }, - client_id: ClientId( - "07-tendermint-1", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 0, - height: 449, - }, - }, - header: Some( - Tendermint(...), - ), - }, - ), - WriteAcknowledgement( - WriteAcknowledgement { - height: Height { - revision: 1, - height: 439, - }, - packet: PortId("transfer") ChannelId("channel-0") Sequence(4), - ack: [ - 123, - 34, - 114, - 101, - 115, - 117, - 108, - 116, - 34, - 58, - 34, - 65, - 81, - 61, - 61, - 34, - 125, - ], - }, - ), - WriteAcknowledgement( - WriteAcknowledgement { - height: Height { - revision: 1, - height: 439, - }, - packet: PortId("transfer") ChannelId("channel-0") Sequence(5), - ack: [ - 123, - 34, - 114, - 101, - 115, - 117, - 108, - 116, - 34, - 58, - 34, - 65, - 81, - 61, - 61, - 34, - 125, - ], - }, - ), -] -``` - -Both packets have been relayed to `ibc-1` and acknowledged. - -## Relay acknowledgment packets - -Use the `tx raw packet-ack` command to relay acknowledgments to the original source of the packets. - -```shell -USAGE: - hermes tx raw packet-ack - -DESCRIPTION: - Relay acknowledgment packets - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - src_chain_id identifier of the source chain - src_port_id identifier of the source port - src_channel_id identifier of the source channel -``` - -__Example__ - -Send the acknowledgments to the `ibc-0` module bound to the `transfer` port and the `channel-1`'s counterparty. - -__NOTE__: The relayer prepends a client update message before the acknowledgments. - -```shell -hermes tx raw packet-ack ibc-0 ibc-1 transfer channel-1 -``` - -```json -Success: [ - UpdateClient( - UpdateClient { - common: Attributes { - height: Height { - revision: 0, - height: 495, - }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 1, - height: 483, - }, - }, - header: Some( - Tendermint(...), - ), - }, - ), - AcknowledgePacket( - AcknowledgePacket { - height: Height { - revision: 0, - height: 495, - }, - packet: PortId("transfer") ChannelId("channel-0") Sequence(4), - }, - ), - AcknowledgePacket( - AcknowledgePacket { - height: Height { - revision: 0, - height: 495, - }, - packet: PortId("transfer") ChannelId("channel-0") Sequence(5), - }, - ), -] -``` - -Both acknowledgments have been received on `ibc-0`. diff --git a/guide/src/commands/relaying/clear.md b/guide/src/commands/relaying/clear.md deleted file mode 100644 index a9f2c581cf..0000000000 --- a/guide/src/commands/relaying/clear.md +++ /dev/null @@ -1,442 +0,0 @@ -# Clearing Packets - -## `clear packets` - -This command clears outstanding packets on a given channel in both directions, -by issuing the appropriate [packet-recvs](../raw/packet.md#relay-receive-and-timeout-packets) -and [packet-acks](../raw/packet.md#relay-acknowledgment-packets). - -### Usage - -``` -Clear outstanding packets (i.e. packet-recv and packet-ack) on a given channel in both directions. - -The channel is identified by the chain, port, and channel IDs at one of its ends - -USAGE: - hermes clear packets - -ARGS: - identifier of the chain - identifier of the port - identifier of the channel - -OPTIONS: - -h, --help Print help information -``` - -### Example - -1. Without Hermes running, send 3 packets over a channel, here `channel-13`: - -``` -❯ hermes tx raw ft-transfer ibc1 ibc0 transfer channel-13 9999 -o 1000 -n 3 -2022-02-24T14:16:28.295526Z INFO ThreadId(01) using default configuration from '/Users/coromac/.hermes/config.toml' -2022-02-24T14:16:28.330860Z INFO ThreadId(15) send_tx{id=ibc0}: refresh: retrieved account sequence=61 number=1 -2022-02-24T14:16:28.350022Z INFO ThreadId(15) wait_for_block_commits: waiting for commit of tx hashes(s) AE4C3186778488E45670EB7303FA77E69B39F4E7C7494B05EC51E55136A373D6 id=ibc0 -Success: [ - SendPacket( - SendPacket { - height: Height { - revision: 0, - height: 86208, - }, - packet: Packet { - sequence: Sequence( - 14, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [ ... ], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - }, - ), - SendPacket( - SendPacket { - height: Height { - revision: 0, - height: 86208, - }, - packet: Packet { - sequence: Sequence( - 15, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [ ... ], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - }, - ), - SendPacket( - SendPacket { - height: Height { - revision: 0, - height: 86208, - }, - packet: Packet { - sequence: Sequence( - 16, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [ ... ], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - }, - ), -] -``` - -2. Because the relayer is not running these packets won't be relayed, -as can be seen with the `query packet unreceived-packets` command: - -``` -❯ hermes query packet unreceived-packets ibc1 transfer channel-13 -2022-02-24T14:21:28.874190Z INFO ThreadId(01) using default configuration from '/Users/coromac/.hermes/config.toml' -Success: [ - 14, - 15, - 16, -] -``` - -3. We can clear them manually using the `clear packets` command: - -``` -❯ hermes clear packets ibc0 transfer channel-13 -2022-02-24T14:17:25.748422Z INFO ThreadId(01) using default configuration from '/Users/coromac/.hermes/config.toml' -2022-02-24T14:17:25.799704Z INFO ThreadId(01) PacketRecvCmd{src_chain=ibc0 src_port=transfer src_channel=channel-13 dst_chain=ibc1}: found unprocessed SendPacket events for [Sequence(14), Sequence(15), Sequence(16)] (first 10 shown here; total=3) -2022-02-24T14:17:25.827177Z INFO ThreadId(01) PacketRecvCmd{src_chain=ibc0 src_port=transfer src_channel=channel-13 dst_chain=ibc1}: ready to fetch a scheduled op. data with batch of size 3 targeting Destination -2022-02-24T14:17:26.504798Z INFO ThreadId(01) PacketRecvCmd{src_chain=ibc0 src_port=transfer src_channel=channel-13 dst_chain=ibc1}:relay{odata=E96CV_cA5P ->Destination @0-86218; len=3}: assembled batch of 4 message(s) -2022-02-24T14:17:26.508873Z INFO ThreadId(29) send_tx{id=ibc1}: refresh: retrieved account sequence=54 number=1 -2022-02-24T14:17:26.561715Z INFO ThreadId(29) wait_for_block_commits: waiting for commit of tx hashes(s) 07AA83524257105CC476063932A560893BE8F4E94C679BFD00F970FC248647E0 id=ibc1 -2022-02-24T14:17:31.948950Z INFO ThreadId(01) PacketRecvCmd{src_chain=ibc0 src_port=transfer src_channel=channel-13 dst_chain=ibc1}:relay{odata=E96CV_cA5P ->Destination @0-86218; len=3}: [Sync->ibc1] result events: - UpdateClientEv(h: 0-86215, cs_h: 07-tendermint-3(0-86219)) - WriteAcknowledgementEv(WriteAcknowledgement - h:0-86215, seq:14, path:channel-13/transfer->channel-12/transfer, toh:0-87203, tos:Timestamp(NoTimestamp))) - WriteAcknowledgementEv(WriteAcknowledgement - h:0-86215, seq:15, path:channel-13/transfer->channel-12/transfer, toh:0-87203, tos:Timestamp(NoTimestamp))) - WriteAcknowledgementEv(WriteAcknowledgement - h:0-86215, seq:16, path:channel-13/transfer->channel-12/transfer, toh:0-87203, tos:Timestamp(NoTimestamp))) - - -2022-02-24T14:17:31.949192Z INFO ThreadId(01) PacketRecvCmd{src_chain=ibc0 src_port=transfer src_channel=channel-13 dst_chain=ibc1}:relay{odata=E96CV_cA5P ->Destination @0-86218; len=3}: success -2022-02-24T14:17:31.989215Z INFO ThreadId(01) PacketAckCmd{src_chain=ibc1 src_port=transfer src_channel=channel-12 dst_chain=ibc0}: found unprocessed WriteAcknowledgement events for [Sequence(14), Sequence(15), Sequence(16)] (first 10 shown here; total=3) -2022-02-24T14:17:32.013500Z INFO ThreadId(01) PacketAckCmd{src_chain=ibc1 src_port=transfer src_channel=channel-12 dst_chain=ibc0}: ready to fetch a scheduled op. data with batch of size 3 targeting Destination -2022-02-24T14:17:33.211930Z INFO ThreadId(01) PacketAckCmd{src_chain=ibc1 src_port=transfer src_channel=channel-12 dst_chain=ibc0}:relay{odata=L4fnSXkxL_ ->Destination @0-86215; len=3}: assembled batch of 4 message(s) -2022-02-24T14:17:33.215803Z INFO ThreadId(15) send_tx{id=ibc0}: refresh: retrieved account sequence=62 number=1 -2022-02-24T14:17:33.245229Z INFO ThreadId(15) wait_for_block_commits: waiting for commit of tx hashes(s) 62C69B1C46AF45182D5D99C6CB5EB301F8A402726772BA4EE067B18C68F2A4D6 id=ibc0 -2022-02-24T14:17:37.465489Z INFO ThreadId(01) PacketAckCmd{src_chain=ibc1 src_port=transfer src_channel=channel-12 dst_chain=ibc0}:relay{odata=L4fnSXkxL_ ->Destination @0-86215; len=3}: [Sync->ibc0] result events: - UpdateClientEv(h: 0-86221, cs_h: 07-tendermint-3(0-86216)) - AcknowledgePacketEv(h:0-86221, seq:14, path:channel-13/transfer->channel-12/transfer, toh:0-87203, tos:Timestamp(NoTimestamp))) - AcknowledgePacketEv(h:0-86221, seq:15, path:channel-13/transfer->channel-12/transfer, toh:0-87203, tos:Timestamp(NoTimestamp))) - AcknowledgePacketEv(h:0-86221, seq:16, path:channel-13/transfer->channel-12/transfer, toh:0-87203, tos:Timestamp(NoTimestamp))) - - -2022-02-24T14:17:37.465802Z INFO ThreadId(01) PacketAckCmd{src_chain=ibc1 src_port=transfer src_channel=channel-12 dst_chain=ibc0}:relay{odata=L4fnSXkxL_ ->Destination @0-86215; len=3}: success -Success: [ - UpdateClient( - UpdateClient { - common: Attributes { - height: Height { - revision: 0, - height: 86215, - }, - client_id: ClientId( - "07-tendermint-3", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 0, - height: 86219, - }, - }, - header: Some( - Tendermint( - Header {...}, - ), - ), - }, - ), - WriteAcknowledgement( - WriteAcknowledgement { - height: Height { - revision: 0, - height: 86215, - }, - packet: Packet { - sequence: Sequence( - 14, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [ ... ], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - ack: [ ... ], - }, - ), - WriteAcknowledgement( - WriteAcknowledgement { - height: Height { - revision: 0, - height: 86215, - }, - packet: Packet { - sequence: Sequence( - 15, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [ ... ], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - ack: [ ... ], - }, - ), - WriteAcknowledgement( - WriteAcknowledgement { - height: Height { - revision: 0, - height: 86215, - }, - packet: Packet { - sequence: Sequence( - 16, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [ ... ], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - ack: [ ... ], - }, - ), - UpdateClient( - UpdateClient { - common: Attributes { - height: Height { - revision: 0, - height: 86221, - }, - client_id: ClientId( - "07-tendermint-3", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 0, - height: 86216, - }, - }, - header: Some( - Tendermint( - Header {...}, - ), - ), - }, - ), - AcknowledgePacket( - AcknowledgePacket { - height: Height { - revision: 0, - height: 86221, - }, - packet: Packet { - sequence: Sequence( - 14, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - }, - ), - AcknowledgePacket( - AcknowledgePacket { - height: Height { - revision: 0, - height: 86221, - }, - packet: Packet { - sequence: Sequence( - 15, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - }, - ), - AcknowledgePacket( - AcknowledgePacket { - height: Height { - revision: 0, - height: 86221, - }, - packet: Packet { - sequence: Sequence( - 16, - ), - source_port: PortId( - "transfer", - ), - source_channel: ChannelId( - "channel-13", - ), - destination_port: PortId( - "transfer", - ), - destination_channel: ChannelId( - "channel-12", - ), - data: [], - timeout_height: Height { - revision: 0, - height: 87203, - }, - timeout_timestamp: Timestamp { - time: None, - }, - }, - }, - ), -] -``` - -4. The packets have now been successfully relayed: - -``` -❯ hermes query packet unreceived-packets ibc1 transfer channel-13 -2022-02-24T14:21:28.874190Z INFO ThreadId(01) using default configuration from '/Users/coromac/.hermes/config.toml' -Success: [] -``` - diff --git a/guide/src/commands/relaying/handshakes.md b/guide/src/commands/relaying/handshakes.md deleted file mode 100644 index a504cffafd..0000000000 --- a/guide/src/commands/relaying/handshakes.md +++ /dev/null @@ -1,59 +0,0 @@ -# Relaying of Handshake Messages - -This section describes the configuration and commands that can be used to start the relayer and relay both handshake and packets -for connections and channels. - -## The `start` Command - -To relay packets and handshake messages configure the `mode` section of the configuration file like so: -```toml -[global] -log_level = 'info' - -[mode] - -[mode.clients] -enabled = true -# ... - -[mode.connections] -enabled = true - -[mode.channels] -enabled = true - -[mode.packets] -enabled = true -# ... -``` - -Then start hermes using the start command: - -```shell -hermes start -``` - -The relayer sends handshake and packet transactions triggered by IBC events. - -## Completing Channel Handshakes - -After the relayer is started using the `start` command, it scans the chain state and will resume the handshake for any -channels or connections that are not in open state. It then listens to IBC events emitted by any of -the configured chains. - -Assuming the events are coming from a `source` chain, the relayer determines the `destination` chain and builds the handshake messages based on these events. These are then sent to the `destination` chain. - -In addition to the events described in [Packet Relaying](packets.md#packet-relaying), the following IBC events may be handled: - -- Channels (if `mode.channels.enabled=true`): - - `chan_open_init`: the relayer builds a `MsgChannelOpenTry` message - - `chan_open_try`: the relayer builds a `MsgChannelOpenAck` message - - `chan_open_ack`: the relayer builds a `MsgChannelOpenConfirm` message - - `chan_open_confirm`: no message is sent out, channel opening is finished - -- Connections (if `mode.connections.enabled=true`): - - `conn_open_init`: the relayer builds a `MsgConnOpenTry` message - - `conn_open_try`: the relayer builds a `MsgConnOpenAck` message - - `conn_open_ack`: the relayer builds a `MsgConnOpenConfirm` message - - `conn_open_confirm`: no message is sent out, connection opening is finished - diff --git a/guide/src/commands/relaying/index.md b/guide/src/commands/relaying/index.md deleted file mode 100644 index c9d4d06b06..0000000000 --- a/guide/src/commands/relaying/index.md +++ /dev/null @@ -1,20 +0,0 @@ -# Relaying -This section describes the types of relaying that hermes can perform. - -Hermes can send transactions triggered by IBC events. It currently handles channel handshake and packet events: - - [packets messages only](./packets.md#packet-relaying) - - [channel and packet messages](./handshakes.md) - -## The `start` Command - -The `start` command can be used to start hermes in IBC event listen mode. - -```shell -USAGE: - hermes start - -DESCRIPTION: - Start the relayer in multi-chain mode. Relays packets and channel handshake messages between all chains in the config. -``` - -As described in next sub-sections, the type of relaying can be configured in the `global` section of the configuration file, by specifying different values in `strategy` field. diff --git a/guide/src/commands/relaying/packets.md b/guide/src/commands/relaying/packets.md deleted file mode 100644 index 760e51297d..0000000000 --- a/guide/src/commands/relaying/packets.md +++ /dev/null @@ -1,64 +0,0 @@ -# Packet Relaying - -This section describes the configuration and commands that can be used to start the relayer and relay packets over one or multiple paths. - -## Table of Contents - - - -## The `start` Command - -To relay packets only configure the `mode` section of the configuration file like so: -```toml -[global] -log_level = 'info' - -[mode] - -[mode.clients] -enabled = true -# ... - -[mode.connections] -enabled = false - -[mode.channels] -enabled = false - -[mode.packets] -enabled = true -# ... -``` - -Then start hermes using the start command: - -```shell -hermes start -``` - -The relayer sends packet transactions triggered by IBC packet events for all open channels between the configured chains. -This is also referred to packet streaming. - -## Packet Streaming - -After the relayer is started using the `start` command, it listens to IBC packet events emitted by any of -the configured chains. Assuming the events are coming from a `source` chain, the relayer builds packets -based on these events, packets that are then sent either to the `source` chain or the counterparty (`destination`) chain. - -Current events and actions are: - -- `send_packet`: the relayer builds a packet message with the `packet` obtained from the event and any required proofs obtained from the counterparty of the chain where the message is sent. The concrete packet is: - - `MsgRecvPacket`, sent to `destination` chain if the channel is in open state on the `destination` chain, and a timeout has not occurred, - - `MsgTimeout`, sent to the `source` chain if the channel is in open state on the `destination` chain, but a timeout has occurred. - - `MsgTimeoutOnClose`, sent to the `source` chain if the channel is in closed state on the `destination` chain. -- `write_acknowledgement`: the relayer builds a `MsgAcknowledgement` packet that is sent to the `destination` chain. - -In addition to these events, the relayer will also handle channel closing events: -- `chan_close_init`: the relayer builds a `MsgChannelCloseConfirm` and sends it to the `destination` chain - -## Packet Delay - -If the relay path is using a non-zero delay connection, then `hermes` will delay all packet transactions. The delay is relative to the submission time for the client update at the height required by the packet proof. -The delay is used to prevent light client attacks and ensures that misbehavior detection finalizes before the transaction is submitted. -For more information on the misbehavior detector see [the misbehaviour section](../misbehaviour/index.md#monitoring-misbehaviour-and-evidence-submission). - diff --git a/guide/src/commands/upgrade/index.md b/guide/src/commands/upgrade/index.md deleted file mode 100644 index 135d9c137e..0000000000 --- a/guide/src/commands/upgrade/index.md +++ /dev/null @@ -1,21 +0,0 @@ -# Client Upgrade - -## Client Upgrade Command - -Use the `upgrade client` command to upgrade a client after a chain upgrade. - -```shell -USAGE: - hermes upgrade client - -DESCRIPTION: - Upgrade an IBC client - -POSITIONAL ARGUMENTS: - dst_chain_id identifier of the destination chain - dst_client_id identifier of the client to be upgraded on destination chain -``` - -__Example__ - -Here is [an example](./test.md) of a chain upgrade proposal submission and client upgrade. diff --git a/guide/src/commands/upgrade/test.md b/guide/src/commands/upgrade/test.md deleted file mode 100644 index 56a2f3ccb3..0000000000 --- a/guide/src/commands/upgrade/test.md +++ /dev/null @@ -1,232 +0,0 @@ -# Testing Client Upgrade - -## Prerequisites - -- gaiad `(v4.2.*)`, for example: - -```shell -gaiad version --log_level error --long | head -n4 -``` - -```shell -name: gaia -server_name: gaiad -version: v4.2.0 -commit: 535be14a8bdbfeb0d950914b5baa2dc72c6b081c -``` - -## Testing procedure - -1. Start two gaia instances and initialize hermes: - - ```shell - ./scripts/dev-env ~/.hermes/config.toml ibc-0 ibc-1 - ``` - The `one-chain` script is invoked for each chain and modifies the `genesis.json` file to use a short window for governance proposals (`200s` for `max_deposit_period` and `voting_period`). Therefore, an upgrade proposal can be submitted, voted on and accepted within a short time. - -2. Create one client on `ibc-1` for `ibc-0`: - - ```shell - hermes create client ibc-1 ibc-0 - ``` - - ```json - Success: CreateClient( - CreateClient( - Attributes { - height: Height { revision: 1, height: 9 }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { revision: 0, height: 18 }, - }, - ), - ) - ``` - -3. Create and submit an upgrade plan for chain `ibc-0`: - - Use the hermes test command to make an upgrade proposal. In the example below a software upgrade proposal is made for `ibc-0`, for the height `300` blocks from latest height. `10000000stake` is deposited. - The proposal includes the upgraded client state constructed from the state of `07-tendermint-0` client on `ibc-1` that was created in the previous step. In addition, the `unbonding_period` of the client is set to some new value (`400h`) - - ```shell - hermes tx raw upgrade-chain ibc-0 ibc-1 07-tendermint-0 10000000 300 - ``` - - ```text - Success: transaction::Hash(CE98D8D98091BA8016BD852D18056E54C4CB3C4525E7F40DD3C40B4FD0F2482B) - ``` - - Note that the height offset should be picked such that the proposal plan height is reached after the `200s` voting period. - - 4. Verify that the proposal was accepted: - - Query the upgrade plan to check that it was submitted correctly. Note the `height` at which the proposal will take effect (chain halts). Also `status: PROPOSAL_STATUS_VOTING_PERIOD`. - - ```shell - gaiad query gov proposal 1 --home data/ibc-0/ - ``` - - ```text - content: - '@type': /cosmos.upgrade.v1beta1.SoftwareUpgradeProposal - description: upgrade the chain software and unbonding period - plan: - height: "332" - info: upgrade the chain software and unbonding period - name: test - time: "0001-01-01T00:00:00Z" - upgraded_client_state: - '@type': /ibc.lightclients.tendermint.v1.ClientState - allow_update_after_expiry: true - allow_update_after_misbehaviour: true - chain_id: ibc-0 - frozen_height: - revision_height: "0" - revision_number: "0" - latest_height: - revision_height: "333" - revision_number: "0" - max_clock_drift: 0s - proof_specs: - - inner_spec: - child_order: - - 0 - - 1 - child_size: 33 - empty_child: null - hash: SHA256 - max_prefix_length: 12 - min_prefix_length: 4 - leaf_spec: - hash: SHA256 - length: VAR_PROTO - prefix: AA== - prehash_key: NO_HASH - prehash_value: SHA256 - max_depth: 0 - min_depth: 0 - - inner_spec: - child_order: - - 0 - - 1 - child_size: 32 - empty_child: null - hash: SHA256 - max_prefix_length: 1 - min_prefix_length: 1 - leaf_spec: - hash: SHA256 - length: VAR_PROTO - prefix: AA== - prehash_key: NO_HASH - prehash_value: SHA256 - max_depth: 0 - min_depth: 0 - trust_level: - denominator: "0" - numerator: "0" - trusting_period: 0s - unbonding_period: 1440000s - upgrade_path: - - upgrade - - upgradedIBCState - title: upgrade_ibc_clients - deposit_end_time: "2021-04-12T16:33:37.187389Z" - final_tally_result: - abstain: "0" - "no": "0" - no_with_veto: "0" - "yes": "0" - proposal_id: "1" - status: PROPOSAL_STATUS_VOTING_PERIOD - submit_time: "2021-04-12T16:30:17.187389Z" - total_deposit: - - amount: "10000000" - denom: stake - voting_end_time: "2021-04-12T16:33:37.187389Z" - voting_start_time: "2021-04-12T16:30:17.187389Z" - ``` - - 5. Vote on the proposal - - The parameter `1` should match the `proposal_id:` from the upgrade proposal submitted at step 3. - This command must be issued while the proposal status is `PROPOSAL_STATUS_VOTING_PERIOD`. Confirm transaction when prompted. - - ```shell - gaiad tx gov vote 1 yes --home data/ibc-0/data/ --keyring-backend test --keyring-dir data/ibc-0/ --chain-id ibc-0 --from validator - ``` - - ```text - confirm transaction before signing and broadcasting [y/N]: y - - {"height":"85","txhash":"AC24D80B1BFE0832769DECFDD3B3DF999A363D5E4390B0B673344FFDED9150B2","codespace":"","code":0,"data":"0A060A04766F7465","raw_log":"[{\"events\":[{\"type\":\"message\",\"attributes\":[{\"key\":\"action\",\"value\":\"vote\"},{\"key\":\"module\",\"value\":\"governance\"},{\"key\":\"sender\",\"value\":\"cosmos1srfzw0jkyyn7wf0ps4zy0tuvdaclfj2ufgp6w3\"}]},{\"type\":\"proposal_vote\",\"attributes\":[{\"key\":\"option\",\"value\":\"VOTE_OPTION_YES\"},{\"key\":\"proposal_id\",\"value\":\"1\"}]}]}]","logs":[{"msg_index":0,"log":"","events":[{"type":"message","attributes":[{"key":"action","value":"vote"},{"key":"module","value":"governance"},{"key":"sender","value":"cosmos1srfzw0jkyyn7wf0ps4zy0tuvdaclfj2ufgp6w3"}]},{"type":"proposal_vote","attributes":[{"key":"option","value":"VOTE_OPTION_YES"},{"key":"proposal_id","value":"1"}]}]}],"info":"","gas_wanted":"200000","gas_used":"43716","tx":null,"timestamp":""} - ``` - - 6. Wait approximately 200 seconds until the proposal changes status to `PROPOSAL_STATUS_PASSED`. - Note the `final tally_result` that includes the vote submitted in the previous step. - - ```shell - gaiad query gov proposal 1 --home data/ibc-0/ - ``` - - ```text - content: - '@type': /cosmos.upgrade.v1beta1.SoftwareUpgradeProposal - description: upgrade the chain software and unbonding period - ... - final_tally_result: - abstain: "0" - "no": "0" - no_with_veto: "0" - "yes": "100000000000" - proposal_id: "1" - status: PROPOSAL_STATUS_PASSED - submit_time: "2021-04-12T16:30:17.187389Z" - total_deposit: - - amount: "10000000" - denom: stake - voting_end_time: "2021-04-12T16:33:37.187389Z" - voting_start_time: "2021-04-12T16:30:17.187389Z" - ``` - -6. Test the `upgrade client` CLI - - The following command performs the upgrade for client `07-tendermint-0`. It outputs two events, one for the updated client state, - and another for the upgraded state. - - ```shell - hermes upgrade client ibc-1 07-tendermint-0 - ``` - ```json - Success: [ - UpdateClient( - UpdateClient { - common: Attributes { - height: Height { revision: 1, height: 438 }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { revision: 0, height: 440 }, - }, - header: Some( - Tendermint(..) - ), - }, - ), - UpgradeClient( - UpgradeClient( - Attributes { - height: Height { revision: 1, height: 438 }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { revision: 0, height: 441 }, - }, - ), - ), - ] - ``` diff --git a/guide/src/config.md b/guide/src/config.md deleted file mode 100644 index cdde0904bd..0000000000 --- a/guide/src/config.md +++ /dev/null @@ -1,183 +0,0 @@ -# Configuration - -In order to run Hermes, you will need to have a configuration file. - -The format supported for the configuration file is [TOML](https://toml.io/en/). - -By default, Hermes expects the configuration file to be located at `$HOME/.hermes/config.toml`. - -This can be overridden by supplying the `-c` flag when invoking `hermes`, before the -name of the command to run, eg. `hermes -c my_config.toml query connection channels ibc-1 connection-1`. - -> The current version of Hermes does not support managing the configuration file programmatically. -> You will need to use a text editor to create the file and add content to it. - -```bash -hermes [-c CONFIG_FILE] COMMAND -``` - -## Table of contents - - - -## Configuration - -The configuration file must have one `global` section, and one `chains` section for each chain. - -> **Note:** As of 0.6.0, the Hermes configuration file is self-documented. -> Please read the configuration file [`config.toml`](https://github.com/informalsystems/ibc-rs/blob/v0.15.0/config.toml) -> itself for the most up-to-date documentation of parameters. - -By default, Hermes will relay on all channels available between all the configured chains. -In this way, every configured chain will act as a source (in the sense that Hermes listens for events) -and as a destination (to relay packets that others chains have sent). - -For example, if there are only two chains configured, then Hermes will only relay packets between those two, -i.e. the two chains will serve as a source for each other, and likewise as a destination for each other's relevant events. -Hermes will ignore all events that pertain to chains which are unknown (ie. not present in config.toml). - -To restrict relaying on specific channels, or uni-directionally, you can use [packet filtering policies](https://github.com/informalsystems/ibc-rs/blob/v0.15.0/config.toml#L207-L224). - -## Adding private keys - -For each chain configured you need to add a private key for that chain in order to submit [transactions](./commands/raw/index.md), -please refer to the [Keys](./commands/keys/index.md) sections in order to learn how to add the private keys that are used by the relayer. - -## Connecting via TLS - -Hermes supports connection via TLS for use-cases such as connecting from behind -a proxy or a load balancer. In order to enable this, you'll want to set the -`rpc_addr`, `grpc_addr`, or `websocket_addr` parameters to specify a TLS -connection via HTTPS using the following scheme (note that the port number 443 -is just used for example): -``` -rpc_addr = 'https://domain.com:443' -grpc_addr = 'https://domain.com:443' -websocket_addr = 'wss://domain.com:443/websocket' -``` - -## Support for Interchain Accounts - -As of version 0.13.0, Hermes supports relaying on [Interchain Accounts][ica] channels. - -If the `packet_filter` option in the chain configuration is disabled, then -Hermes will relay on all existing and future channels, including ICA channels. - -There are two kinds of ICA channels: - -1. The host channels, whose port is `icahost` -2. The controller channels, whose port starts with `icacontroller-` followed - by the owner account address. [See the spec for more details][ica]. - -If you wish to only relay on a few specific standard channels (here `channel-0` and `channel-1`), -but also relay on all ICA channels, you can specify the following packet filter: - -> Note the use of wildcards in the port and channel identifiers (`['ica*', '*']`) -> to match over all the possible ICA ports. - -```toml -[chains.packet_filter] -policy = 'allow' -list = [ - ['ica*', '*'], # allow relaying on all channels whose port starts with `ica` - ['transfer', 'channel-0'], - ['transfer', 'channel-1'], - # Add any other port/channel pairs you wish to relay on -] -``` - -If you wish to relay on all channels but not on ICA channels, you can use -the following packet filter configuration: - -```toml -[chains.packet_filter] -policy = 'deny' -list = [ - ['ica*', '*'], # deny relaying on all channels whose port starts with `ica` -] -``` - -## Update the configuration without restarting Hermes - -> ⚠️ This feature has been removed in Hermes v0.12.0. - -Before Hermes 0.6.1, the only way to get Hermes to pick up a change in the -configuration was to stop and restart Hermes. - -As of version 0.6.1, Hermes will react to receiving a `SIGHUP` signal -by reloading the `[chains]` section of the configuration, and -stopping, starting or restarting the affected workers. - -> **Warning:** the configuration reload feature only supports -> adding, removing, or updating configuration of chains. It does -> not support dynamically changing global features, such as the -> filtering mechanism or logging level. - -For example, say you start with the configuration given in the previous section -in `~/.hermes/config.toml`, ie. with two chains `ibc-0` and `ibc-1`. - -1. Start three chains `ibc-0`, `ibc-1` and `ibc-2`: - - ```shell - ./scripts/dev-env ibc-0 ibc-1 ibc-2 - ``` - -2. Start Hermes - - ```shell - hermes start - ``` - -3. Add the configuration for the chain `ibc-2` to the configuration file: - - ```toml - [[chains]] - id = 'ibc-2' - rpc_addr = 'http://127.0.0.1:26457' - grpc_addr = 'http://127.0.0.1:9092' - websocket_addr = 'ws://127.0.0.1:26457/websocket' - rpc_timeout = '10s' - account_prefix = 'cosmos' - key_name = 'testkey' - store_prefix = 'ibc' - max_gas = 20000000 - gas_price = { price = 0.001, denom = 'stake' } - clock_drift = '5s' - trusting_period = '14days' - ``` - -4. Change the configuration of the chain `ibc-0`, eg. the `max_gas` property. - -5. Send a `SIGHUP` signal to the `hermes` process: - - > ⚠️ **Warning:** the command below will send a `SIGHUP` signal to the first - > process in the list emitted by `ps aux` which contains the string `hermes`. - > Alternatively, you can look up the process ID (PID) of the `hermes` process - > you want to target and use `kill -SIGHUP PID`. - - ```shell - ps aux | rg hermes | awk '{ print $2 }' | head -n1 | xargs -I{} kill -SIGHUP {} - ``` - -6. Watch the output of Hermes, it will show that Hermes has picked up the changes in - the config. Hermes is now relaying between the three chains and using the new - maximum amount of gas specified for `ibc-0`. - - ``` - ... - - INFO reloading configuration (triggered by SIGHUP) - INFO configuration successfully reloaded - INFO updating existing chain chain.id=ibc-1 - INFO adding new chain chain.id=ibc-2 - ``` - -To make sure Hermes ends up in the expected state, check out the documentation -on [inspecting the relayer state](help.md#inspecting-the-relayer-state). - -## Next steps - -Now that you learned how to build the relayer and how to create a configuration file, you can go to the [`Two Chains`](./tutorials/local-chains/index.md) tutorial to learn how to perform some local testing connecting the relayer to two local chains. - -[log-level]: ./help.md#parametrizing-the-log-output-level -[ica]: https://github.com/cosmos/ibc/blob/master/spec/app/ics-027-interchain-accounts/README.md diff --git a/guide/src/example-config.md b/guide/src/example-config.md deleted file mode 100644 index a6a1095f3a..0000000000 --- a/guide/src/example-config.md +++ /dev/null @@ -1,7 +0,0 @@ -# Example Configuration File - -Here is a full example of a configuration file with two chains configured: - -```toml -{{#include ../../config.toml}} -``` diff --git a/guide/src/features.md b/guide/src/features.md deleted file mode 100644 index 5dcb606a2f..0000000000 --- a/guide/src/features.md +++ /dev/null @@ -1,62 +0,0 @@ -# Features - -This section includes a summary of the supported and planned features. -A feature matrix and comparison between the Rust and Go relayer implementations can be found in the [Feature Matrix](./features/matrix.md) - -> **Cosmos SDK compatibility:** -> Hermes supports Cosmos SDK chains implementing the [IBC v1.1][ibcv1] protocol specification. -> Cosmos SDK versions `0.41.3` to `0.44.x` are officially supported. -> In case Hermes finds an incompatible SDK version, it will output a log warning. - -[ibcv1]: https://github.com/cosmos/ibc-go - -## Supported Features - -- Basic features - - create and update clients - - refresh clients to prevent expiration - - establish connections with new or existing clients - - establish channels with new or existing connection - - channel closing handshake - - relay packets, acknowledgments, timeout and timeout-on-close packets, with zero or non-zero delay. - - queries for all objects -- Packet relaying over: - - multiple paths, for the chains in `config.toml` -- Restart support - - clear packets - - resume channel handshake if configured to relay `all` - - resume connection handshake if configured to relay `all` -- Client upgrade - - upgrading clients after a counterparty chain has performed an upgrade for IBC breaking changes -- Packet delay: - - establish path over non-zero delay connection - - relay all packets with the specified delay -- Monitor and submit misbehaviour for clients - - monitor client updates for misbehaviour (fork and BFT time violation) - - submit misbehaviour evidence to the on-chain IBC client. - > misbehaviour submission to full node not yet supported -- Individual commands that build and send transactions for: - - creating and updating IBC Tendermint light clients - - sending connection open handshake datagrams - - sending channel open handshake datagrams - - sending channel closing handshake datagrams - - initiating a cross chain transfer (mainly for testing) - - relaying sent packets, acknowledgments and timeouts - - client upgrade -- Channel handshake for existing channel that is not in `Open` state -- Connection handshake for existing connection that is not in `Open` state -- Telemetry support - -## Upcoming / Unsupported Features - -Planned features: -- Full Passive mode: relay from all IBC events - - Connection handshake for existing connection that is not in `Open` state -- Relayer support for management application (add RPC server) -- Dynamic configuration management - -Not planned: -- Relayer management application -- Create clients with user chosen parameters (such as UpgradePath) -- Use IBC light clients other than Tendermint such as Solo Machine -- Support non cosmos-SDK chains diff --git a/guide/src/features/matrix.md b/guide/src/features/matrix.md deleted file mode 100644 index 77573acaf8..0000000000 --- a/guide/src/features/matrix.md +++ /dev/null @@ -1,65 +0,0 @@ -# Feature Matrix -This section gives more details about the features and implementation status -of Hermes in comparison with the [cosmos-go-relayer]. - -__Legend__: - -| Term | Description | -| ----- | ----------- | -| ❌ | feature not supported | -| ✅ | feature is supported | -| `Chain` | chain related | -| `Cl` | client related | -| `Conn` | connection related | -| `Chan` | channel related | -| `.._Handshake_..` | can execute all transactions required to finish a handshake from a single command | -| `..__A` | building and sending `msg` from a command that scans chain state | -| `..__P` | building and sending `msg` from IBC event; doesn't apply to `.._Init` and `FT_Transfer` features | - -__Feature comparison between Hermes and the Go relayer__ - -| Features \ Status | Hermes | Cosmos Go | Feature Details | -| ---------------------- | :---: | :----: |:-------| -| Restart | ✅ | ✅ | replays any IBC events that happened before restart -| Multiple_Paths | ✅ | ✅ | relays on multiple paths concurrently -| | | | -| Connection Delay | ✅ | ❌ | -| Cl_Misbehavior | ✅ | ❌ | monitors and submits IBC client misbehavior -| Cl_Refresh | ✅ | ❌ | periodically refresh an on-chain client to prevent expiration -| Packet Delay | ✅ | ❌ | -| | | | -| Chan_Unordered | ✅ | ✅ | -| Chan_Ordered | ✅ | ❓ | -| | | | -| Cl_Tendermint_Create | ✅ | ✅ | tendermint light client creation -| Cl_Tendermint_Update | ✅ | ✅ | tendermint light client update -| Cl_Tendermint_Upgrade | ✅ | ✅ | tendermint light client upgrade -| | | | -| Conn_Open_Handshake_A | ✅ | ✅ | -| Conn_Open_Handshake_P | ✅ | ❌ | -| | | | -| Chan_Open_Handshake_A | ✅ | ✅ | -| Chan_Open_Handshake_P | ✅ | ❌ | -| Chan_Open_Handshake_Optimistic | ❌ | ❌ | open a channel on a non-Open connection -| | | | -| Chan_Close_Handshake_P | ✅ | ✅ | -| Chan_Close_Handshake_A | ✅ | ❌ | -| | | | -| FT_Transfer | ✅ | ✅ | can submit an ICS-20 fungible token transfer message -| ICA_Relay | ✅ | ❌ | can relay ICS-27 Interchain account packets -| Packet_Recv_A | ✅ | ✅ | -| Packet_Recv_P | ✅ | ✅ | -| Packet_Timeout_A | ✅ | ✅ | -| Packet_Timeout_P | ✅ | ✅ | -| Packet_TimeoutClose_A | ✅ | ❓ | -| Packet_TimeoutClose_P | ✅ | ❓ | -| Packet_Optimistic | ❌ | ❓ | relay packets over non-Open channels -| | | | -| Cl_Non_Tendermint | ❌ | ❌ | supports non tendermint IBC light clients -| Chain_Non_Cosmos | ❌ | ❌ | supports non cosmos-SDK chains -| | | | -| Mgmt_Static | ✅ | ✅ | provides means for configuration prior to being started -| Mgmt_Dynamic | ❌ | ❌ | provides means for configuration and monitoring during runtime - - -[cosmos-go-relayer]: https://github.com/cosmos/relayer diff --git a/guide/src/getting_started.md b/guide/src/getting_started.md deleted file mode 100644 index 0c1f747fbd..0000000000 --- a/guide/src/getting_started.md +++ /dev/null @@ -1,13 +0,0 @@ -# Getting Started - -In order to run Hermes, please make sure you have all the -[pre-requisites](./pre_requisites.md) installed on your machine. - -Once you have these pre-requisites, you can -[build and run Hermes](./installation.md). - -> The instructions in this guide have been tested on `Linux` and `MacOS` -> environments. Most of the commands should work on both environments. Even -> though you can build and run the relayer on `Windows` (since we develop it -> in Rust and it supports cross platform compilation) we have not tested the -> relayer on `Windows` and we do not support this operating system at this time. \ No newline at end of file diff --git a/guide/src/glossary.md b/guide/src/glossary.md deleted file mode 100644 index 89f406494b..0000000000 --- a/guide/src/glossary.md +++ /dev/null @@ -1,14 +0,0 @@ -# Glossary - -These are some of the definitions used in this guide: - -| Term | Definition | -|------|------------| -|IBC transaction| A transaction that includes IBC datagrams (including packets). This is constructed by the relayer and sent over the physical network to a chain according to the chain rules. For example, for tendermint chains a broadcast_tx_commit request is sent to a tendermint RPC server.| -|IBC datagram| An element of the transaction payload sent by the relayer; it includes client, connection, channel and IBC packet data. Multiple IBC datagrams may be included in an IBC transaction.| -|IBC packet| A particular type of IBC datagram that includes the application packet and its commitment proof.| -|IBC Client| Client code running on chain, typically only the light client verification related functionality.| -|Relayer Light Client| Full light client functionality, including connecting to at least one provider (full node), storing and verifying headers, etc.| -|Source chain| The chain from which the relayer reads data to fill an IBC datagram.| -|Destination chain| The chain where the relayer submits transactions that include the IBC datagram.| - diff --git a/guide/src/help.md b/guide/src/help.md deleted file mode 100644 index e2e7bfcf45..0000000000 --- a/guide/src/help.md +++ /dev/null @@ -1,586 +0,0 @@ -# Help - -This section provides guidelines regarding troubleshooting and general -resources for getting help with `hermes`. -For this purpose, we recommend a few ideas that could be of help: - -- [hermes help][help] command, providing a CLI - documentation for all `hermes` commands. -- [profile][profiling] your relayer binary to identify slow methods; -- [configure][log-level] the `log_level` to help with debugging; -- [patch][patching] your local gaia chain(s) to enable some corner-case methods - (e.g., channel close); - -And if the above options do not address your specific problem: -- you can [request a new feature][feature]; -- or consult the [list of reported issues][issues] and search by relevant - keywords to see if you're dealing with a known problem; -- we would be grateful if you can submit a [bug report][bug-report] - discussing any problem you find, and from there on we can look at the - problem together; - -Lastly, for general questions, you can reach us at `hello@informal.systems`, -or on Twitter [@informalinc][twitter]. - -## Table of contents - - - -## Help command - -The CLI comprises a special `help` command, which accepts as parameter other commands, and provides guidance on what is the correct way to invoke those commands. - -For instance, - -```shell -hermes help create -``` - -will provide details about all the valid invocations of the `create` CLI command. - -``` -USAGE: - hermes create - -DESCRIPTION: - Create objects (client, connection, or channel) on chains - -SUBCOMMANDS: - help Get usage information - client Create a new IBC client - connection Create a new connection between two chains - channel Create a new channel between two chains -``` - -This can provide further specific guidance if we add additional parameters, e.g., - -```shell -hermes help create channel -``` - -``` -USAGE: - hermes create channel - -DESCRIPTION: - Create a new channel between two chains - -POSITIONAL ARGUMENTS: - chain_a_id identifier of the side `a` chain for the new channel - chain_b_id identifier of the side `b` chain for the new channel (optional) - -FLAGS: - -c, --connection-a CONNECTION-A - --port-a PORT-A identifier of the side `a` port for the new channel - --port-b PORT-B identifier of the side `b` port for the new channel - -o, --order ORDER the channel ordering, valid options 'unordered' (default) and 'ordered' - -v, --channel-version VERSION the version for the new channel -``` - -Additionally, the `-h`/`--help` flags typical for CLI applications work on -all commands. - -## Parametrizing the log output level - -The relayer configuration file permits parametrization of output verbosity via the knob called `log_level`. -This file is loaded by default from `$HOME/.hermes/config.toml`, but can be overridden in all commands -with the `-c` flag, eg. `hermes -c ./path/to/my/config.toml some command`. - -Relevant snippet: - -```toml -[global] -log_level = 'error' -``` - -Valid options for `log_level` are: 'error', 'warn', 'info', 'debug', 'trace'. -These levels correspond to the tracing sub-component of the relayer-cli, -[see here](https://docs.rs/tracing-core/0.1.17/tracing_core/struct.Level.html). - -The relayer will _always_ print a last line summarizing the result of its -operation for queries or transactions. In addition to this last line, -arbitrary debug, info, or other outputs may be produced. - -## Overriding the tracing filter using `RUST_LOG` - -For debugging purposes, we may want to inspect which RPC queries the relayer is making. -The relayer makes use of the `tendermint-rpc` library to issue RPC queries, but -the output of this library is by default turned off in order to keep the logs more -readable. - -Using the `RUST_LOG` environment variable, we can turn logging on for the -`tendermint-rpc` library, as follows: - -``` -RUST_LOG=tendermint-rpc=debug,info hermes start -``` - -Setting the `RUST_LOG` environment variable to `tendermint_rpc=debug,info` instructs -the relayer to set the log level of the `tendermint_rpc` crate to `debug` and otherwise -use the `info` log level. - -> **Note:** While the `tendermint-rpc` contains a dash in its name, the logging filter -> expects a module name, which can only contain alphanumeric characters and underscores, -> hence why the filter above is written `tendermint_rpc=debug`. - -**Example:** - -``` -❯ RUST_LOG=tendermint_rpc=debug,info hermes start -2022-02-24T14:32:14.039555Z INFO ThreadId(01) using default configuration from '/Users/coromac/.hermes/config.toml' -2022-02-24T14:32:14.043500Z INFO ThreadId(01) telemetry service running, exposing metrics at http://127.0.0.1:3001/metrics -2022-02-24T14:32:14.043542Z INFO ThreadId(01) [rest] address not configured, REST server disabled -2022-02-24T14:32:14.049759Z DEBUG ThreadId(01) Incoming response: { - "jsonrpc": "2.0", - "id": "143b4580-c49e-47c1-81b2-4e7090f6e762", - "result": { - "node_info": { - "protocol_version": { - "p2p": "8", - "block": "11", - "app": "0" - }, - "id": "73f9134539f9845cd253dc302e36d48ee4c0f32d", - "listen_addr": "tcp://0.0.0.0:27003", - "network": "ibc0", - "version": "v0.34.14", - "channels": "40202122233038606100", - "moniker": "ibc0", - "other": { - "tx_index": "on", - "rpc_address": "tcp://0.0.0.0:27000" - } - }, - "sync_info": { - "latest_block_hash": "8396B93E355AD80EED8167A04BB9858A315A8BEB482547DE16A6CD82BC11551B", - "latest_app_hash": "22419E041D6997EE75FF66F7F537A3D36122B220EAB89A9C246FEF680FB1C97A", - "latest_block_height": "86392", - "latest_block_time": "2022-02-24T14:32:08.673989Z", - "earliest_block_hash": "0A73CFE8566D4D4FBFE3178D9BCBAD483FD689854CA8012FF1457F8EC4598132", - "earliest_app_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", - "earliest_block_height": "1", - "earliest_block_time": "2022-01-20T09:04:21.549736Z", - "catching_up": false - }, - "validator_info": { - "address": "6FD56E6AA1EEDAD227AFAB6B9DE631719D4A3691", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "mR5V/QWOv/mJYyNmlsl3mfxKy1PNaOzdztyas4NF2BA=" - }, - "voting_power": "10" - } - } -} -2022-02-24T14:32:14.052503Z DEBUG ThreadId(21) Incoming response: { - "jsonrpc": "2.0", - "id": "0ca35e64-ea98-4fbf-bd66-c3291128ace9", - "result": {} -} - -... -``` - -The two DEBUG log lines above were emitted by the `tendermint-rpc` crate. - -## Inspecting the relayer state - -To get a little bit of insight into the state of the relayer, -Hermes will react to a `SIGUSR1` signal by dumping its state to -the console, either in plain text form or as a JSON object if Hermes -was started with the `--json` option. - -To send a `SIGUSR1` signal to Hermes, look up its process ID (below PID) -and use the following command: - -```shell -kill -SIGUSR1 PID -``` - -Hermes will print some information about the workers which are currently running. - -For example, with three chains configured and one channel between each pair of chains: - -```text -INFO Dumping state (triggered by SIGUSR1) -INFO -INFO * Chains: ibc-0, ibc-1, ibc-2 -INFO * Client workers: -INFO - client::ibc-0->ibc-1:07-tendermint-0 (id: 5) -INFO - client::ibc-0->ibc-2:07-tendermint-0 (id: 9) -INFO - client::ibc-1->ibc-0:07-tendermint-0 (id: 1) -INFO - client::ibc-1->ibc-2:07-tendermint-1 (id: 11) -INFO - client::ibc-2->ibc-0:07-tendermint-1 (id: 3) -INFO - client::ibc-2->ibc-1:07-tendermint-1 (id: 7) -INFO * Packet workers: -INFO - packet::channel-0/transfer:ibc-0->ibc-1 (id: 2) -INFO - packet::channel-0/transfer:ibc-1->ibc-0 (id: 6) -INFO - packet::channel-0/transfer:ibc-2->ibc-0 (id: 10) -INFO - packet::channel-1/transfer:ibc-0->ibc-2 (id: 4) -INFO - packet::channel-1/transfer:ibc-1->ibc-2 (id: 8) -INFO - packet::channel-1/transfer:ibc-2->ibc-1 (id: 12) -``` - -or in JSON form (prettified): - -```json -{ - "timestamp": "Jul 12 17:04:37.244", - "level": "INFO", - "fields": { - "message": "Dumping state (triggered by SIGUSR1)" - } -} -{ - "chains": [ - "ibc-0", - "ibc-1", - "ibc-2" - ], - "workers": { - "Client": [ - { - "id": 5, - "object": { - "type": "Client", - "dst_chain_id": "ibc-1", - "dst_client_id": "07-tendermint-0", - "src_chain_id": "ibc-0" - } - }, - { - "id": 9, - "object": { - "type": "Client", - "dst_chain_id": "ibc-2", - "dst_client_id": "07-tendermint-0", - "src_chain_id": "ibc-0" - } - }, - { - "id": 1, - "object": { - "type": "Client", - "dst_chain_id": "ibc-0", - "dst_client_id": "07-tendermint-0", - "src_chain_id": "ibc-1" - } - }, - { - "id": 11, - "object": { - "type": "Client", - "dst_chain_id": "ibc-2", - "dst_client_id": "07-tendermint-1", - "src_chain_id": "ibc-1" - } - }, - { - "id": 3, - "object": { - "type": "Client", - "dst_chain_id": "ibc-0", - "dst_client_id": "07-tendermint-1", - "src_chain_id": "ibc-2" - } - }, - { - "id": 7, - "object": { - "type": "Client", - "dst_chain_id": "ibc-1", - "dst_client_id": "07-tendermint-1", - "src_chain_id": "ibc-2" - } - } - ], - "Packet": [ - { - "id": 2, - "object": { - "type": "Packet", - "dst_chain_id": "ibc-1", - "src_chain_id": "ibc-0", - "src_channel_id": "channel-0", - "src_port_id": "transfer" - } - }, - { - "id": 6, - "object": { - "type": "Packet", - "dst_chain_id": "ibc-0", - "src_chain_id": "ibc-1", - "src_channel_id": "channel-0", - "src_port_id": "transfer" - } - }, - { - "id": 10, - "object": { - "type": "Packet", - "dst_chain_id": "ibc-0", - "src_chain_id": "ibc-2", - "src_channel_id": "channel-0", - "src_port_id": "transfer" - } - }, - { - "id": 4, - "object": { - "type": "Packet", - "dst_chain_id": "ibc-2", - "src_chain_id": "ibc-0", - "src_channel_id": "channel-1", - "src_port_id": "transfer" - } - }, - { - "id": 8, - "object": { - "type": "Packet", - "dst_chain_id": "ibc-2", - "src_chain_id": "ibc-1", - "src_channel_id": "channel-1", - "src_port_id": "transfer" - } - }, - { - "id": 12, - "object": { - "type": "Packet", - "dst_chain_id": "ibc-1", - "src_chain_id": "ibc-2", - "src_channel_id": "channel-1", - "src_port_id": "transfer" - } - } - ] - } -} -``` - -## Patching `gaia` to support `ChanCloseInit` - -The guide below refers specifically to patching your gaia chain so that the -relayer can initiate the closing of channels by submitting a [`ChanCloseInit`][chan-close] message. -Without this modification, the transaction will be rejected. -We also describe how to test the channel closing feature. - -- Clone the Cosmos SDK - - ```shell - git clone https://github.com/cosmos/cosmos-sdk.git ~/go/src/github.com/cosmos/cosmos-sdk - cd ~/go/src/github.com/cosmos/cosmos-sdk - ``` - -- Apply these diffs: - - ``` - --- a/x/ibc/applications/transfer/module.go - +++ b/x/ibc/applications/transfer/module.go - @@ -305,7 +305,7 @@ func (am AppModule) OnChanCloseInit( - channelID string, - ) error { - // Disallow user-initiated channel closing for transfer channels - - return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "user cannot close channel") - + return nil - } - ``` - -- Append the line below (watch for the placeholder ``) as the last line - in your `go.mod` in the gaia clone: - -```replace github.com/cosmos/cosmos-sdk => /Users//go/src/github.com/cosmos/cosmos-sdk``` - -- Now `make build` and `make install` your local copy of gaia - -In order to test the correct operation during the channel close, perform the steps below. - -- the channel should be in state open-open: - -- transfer of 5555 samoleans from `ibc-1` to `ibc-0`. This results in a - Tx to `ibc-1` for a `MsgTransfer` packet. - Make sure you're not relaying this packet (the relayer should not be running on - this path). - - ```shell - hermes tx raw ft-transfer ibc-0 ibc-1 transfer channel-1 5555 -o 1000 -n 1 -d samoleans - ``` - -- now do the first step of channel closing: the channel will transition -to close-open: - - ```shell - hermes -c config.toml tx raw chan-close-init ibc-0 ibc-1 connection-0 transfer transfer channel-0 channel-1 - ``` - -- trigger timeout on close to ibc-1 - - ```shell - hermes -c config.toml tx raw packet-recv ibc-0 ibc-1 transfer channel-1 - ``` - -- close-close - - ```shell - hermes -c config.toml tx raw chan-close-confirm ibc-1 ibc-0 connection-1 transfer transfer channel-1 channel-0 - ``` - -- verify that the two ends are in Close state: - - ```shell - hermes -c config.toml query channel end ibc-0 transfer channel-0 - hermes -c config.toml query channel end ibc-1 transfer channel-1 - ``` - - -## New Feature Request - -If you would like a feature to be added to `hermes`, don't hesitate -to open a discussion about that via the [feature request][feature-request] -issue template. - -> Note that Hermes is packaged as part of the `ibc-relayer-cli` crate. - - -## Profiling - -The `relayer` crate provides a `time!` macro which can be used to measure how much time is spent between the invocation of the macro and the end of the enclosing scope. - -### Setup - -The `time!` macro has no effect unless the `profiling` feature of the `relayer` crate is enabled. - -To enable it, one must compile the `relayer-cli` crate with the `--features=profiling` flag. - -a) One way is to build the `relayer` binary and update the `hermes` alias to point to the executable: - -```shell -cd relayer-cli/ -cargo build --features=profiling -``` - -b) Alternatively, one can use the `cargo run` command and update the alias accordingly: - -```shell -alias hermes='cargo run --features=profiling --manifest-path=relayer-cli/Cargo.toml --' -``` - -The `--manifest-path=relayer-cli/Cargo.toml` flag is needed for `cargo run` to accept the `--features` flag. - -### Example - -```rust -fn my_function(x: u32) -> u32 { - time!("myfunction: x={}", x); // A - - std::thread::sleep(Duration::from_secs(1)); - - { - time!("inner operation"); // B - - std::thread::sleep(Duration::from_secs(2)); - - // timer B ends here - } - - x + 1 - - // timer A ends here -} -``` - -#### Output - -``` -Jan 20 11:28:46.841 INFO relayer::macros::profiling: ⏳ myfunction: x=42 - start -Jan 20 11:28:47.842 INFO relayer::macros::profiling: ⏳ inner operation - start -Jan 20 11:28:49.846 INFO relayer::macros::profiling: ⏳ inner operation - elapsed: 2004ms -Jan 20 11:28:49.847 INFO relayer::macros::profiling: ⏳ myfunction: x=42 - elapsed: 3005ms -``` - -Profiling is useful for tracking down unusually slow methods. -Each transaction or query usually consists of multiple lower-level methods, -and it's often not clear which of these are the culprit for low performance. -With profiling enabled, `hermes` will output timing information for individual -methods involved in a command. - -__NOTE__: To be able to see the profiling output, the realyer needs to be compiled with -the `profiling` feature and the [log level][log-level] should be `info` level or lower. - -#### Example output for `tx raw conn-init` command - -``` -hermes -c config.toml tx raw conn-init ibc-0 ibc-1 07-tendermint-0 07-tendermint-0 -``` - -``` -Apr 13 20:58:21.225 INFO ibc_relayer::macros::profiling: ⏳ init_light_client - start -Apr 13 20:58:21.230 INFO ibc_relayer::macros::profiling: ⏳ init_light_client - elapsed: 4ms -Apr 13 20:58:21.230 INFO ibc_relayer::macros::profiling: ⏳ init_event_monitor - start -Apr 13 20:58:21.235 INFO ibc_relayer::macros::profiling: ⏳ init_event_monitor - elapsed: 5ms -Apr 13 20:58:21.235 INFO ibc_relayer::event::monitor: running listener chain.id=ibc-1 -Apr 13 20:58:21.236 INFO ibc_relayer::macros::profiling: ⏳ init_light_client - start -Apr 13 20:58:21.239 INFO ibc_relayer::macros::profiling: ⏳ init_light_client - elapsed: 2ms -Apr 13 20:58:21.239 INFO ibc_relayer::macros::profiling: ⏳ init_event_monitor - start -Apr 13 20:58:21.244 INFO ibc_relayer::macros::profiling: ⏳ init_event_monitor - elapsed: 4ms -Apr 13 20:58:21.244 INFO ibc_relayer::event::monitor: running listener chain.id=ibc-0 -Apr 13 20:58:21.244 INFO ibc_relayer::macros::profiling: ⏳ get_signer - start -Apr 13 20:58:21.246 INFO ibc_relayer::macros::profiling: ⏳ get_signer - elapsed: 1ms -Apr 13 20:58:21.246 INFO ibc_relayer::macros::profiling: ⏳ query_latest_height - start -Apr 13 20:58:21.246 INFO ibc_relayer::macros::profiling: ⏳ block_on - start -Apr 13 20:58:21.248 INFO ibc_relayer::macros::profiling: ⏳ block_on - elapsed: 1ms -Apr 13 20:58:21.249 INFO ibc_relayer::macros::profiling: ⏳ query_latest_height - elapsed: 3ms -Apr 13 20:58:21.250 INFO ibc_relayer::macros::profiling: ⏳ unbonding_period - start -Apr 13 20:58:21.250 INFO ibc_relayer::macros::profiling: ⏳ block_on - start -Apr 13 20:58:21.251 INFO ibc_relayer::macros::profiling: ⏳ block_on - elapsed: 0ms -Apr 13 20:58:21.270 INFO ibc_relayer::macros::profiling: ⏳ block_on - start -Apr 13 20:58:21.273 INFO ibc_relayer::macros::profiling: ⏳ block_on - elapsed: 2ms -Apr 13 20:58:21.273 INFO ibc_relayer::macros::profiling: ⏳ unbonding_period - elapsed: 23ms -Apr 13 20:58:21.279 INFO ibc_relayer::macros::profiling: ⏳ build_consensus_state - start -Apr 13 20:58:21.280 INFO ibc_relayer::macros::profiling: ⏳ build_consensus_state - elapsed: 0ms -Apr 13 20:58:21.280 INFO ibc_relayer::macros::profiling: ⏳ send_msgs - start -Apr 13 20:58:21.280 INFO ibc_relayer::macros::profiling: ⏳ send_tx - start -Apr 13 20:58:21.282 INFO ibc_relayer::macros::profiling: ⏳ PK "03f17d2c094ee68cfcedb2c2f2b7dec6cd82ea158ac1c32d3de0ca8b288a3c8bfa" - start -Apr 13 20:58:21.282 INFO ibc_relayer::macros::profiling: ⏳ block_on - start -Apr 13 20:58:21.285 INFO ibc_relayer::macros::profiling: ⏳ block_on - elapsed: 3ms -Apr 13 20:58:21.296 INFO ibc_relayer::macros::profiling: ⏳ block_on - start -Apr 13 20:58:22.664 INFO ibc_relayer::macros::profiling: ⏳ block_on - elapsed: 1367ms -Apr 13 20:58:22.664 INFO ibc_relayer::macros::profiling: ⏳ PK "03f17d2c094ee68cfcedb2c2f2b7dec6cd82ea158ac1c32d3de0ca8b288a3c8bfa" - elapsed: 1382ms -Apr 13 20:58:22.664 INFO ibc_relayer::macros::profiling: ⏳ send_tx - elapsed: 1384ms -Apr 13 20:58:22.664 INFO ibc_relayer::macros::profiling: ⏳ send_msgs - elapsed: 1384ms -Success: CreateClient( - CreateClient( - Attributes { - height: Height { - revision: 0, - height: 10675, - }, - client_id: ClientId( - "07-tendermint-7", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 1, - height: 10663, - }, - }, - ), -) -``` - - - -[help]: ./help.md#help-command -[feature-request]: https://github.com/informalsystems/ibc-rs/issues/new?assignees=&labels=&template=feature-request.md -[bug-report]: https://github.com/informalsystems/ibc-rs/issues/new?assignees=&labels=&template=bug-report.md -[twitter]: https://twitter.com/informalinc -[twitter-image]: https://abs.twimg.com/errors/logo23x19.png -[website]: https://informal.systems -[log-level]: ./help.md#parametrizing-the-log-output-level -[issues]: https://github.com/informalsystems/ibc-rs/issues -[profiling]: ./help.md#profiling -[feature]: ./help.md#new-feature-request -[patching]: ./help.md#patching-gaia -[chan-close]: ./commands/raw/channel-close.md#channel-close-init diff --git a/guide/src/images/grafana.png b/guide/src/images/grafana.png deleted file mode 100644 index 5d2882ec52..0000000000 Binary files a/guide/src/images/grafana.png and /dev/null differ diff --git a/guide/src/index.md b/guide/src/index.md deleted file mode 100644 index 52806e83ee..0000000000 --- a/guide/src/index.md +++ /dev/null @@ -1,53 +0,0 @@ -# Hermes Guide (v0.15.0) - - -Hermes is a an open-source Rust implementation of a relayer for the -[Inter-Blockchain Communication protocol](https://ibcprotocol.org) (IBC). - -This guide can help you setup, configure, and operate Hermes to transfer -packets between two or more IBC-enabled chains. - -## Sections - -**[What is Hermes](./relayer.md)** - -- Explains what Hermes is about. - -**[Features](./features.md)** - -- This section discusses what features to expect from Hermes, as well as a - comparison between the Cosmos Go relayer and Hermes. - -**[Getting Started](./getting_started.md)** - -- The getting started section can help you setup, configure, and run Hermes. - -**[Tutorials](./tutorials/index.md)** - -- This section provides some tutorials on how to operate and test Hermes. - -**[Commands Reference](./commands/index.md)** - -- The commands let you interact with Hermes using its command line interface. - -**[Help](./help.md)** - -- This part provides guidelines regarding troubleshooting and general resources - for getting help. - -**[Glossary](./glossary.md)** - -- This section provides some definitions of terms used throughout the guide - ---- - -**Other References and Useful Links:** - -* [Hermes Github repository](https://github.com/informalsystems/ibc-rs) - — The official Github repository for Hermes. -* [IBC Github repository](https://github.com/cosmos/ics) - - The official repository for the Inter-blockchain protocol (IBC). - -## Disclaimer - -This project is undergoing heavy development, use at your own risk. diff --git a/guide/src/installation.md b/guide/src/installation.md deleted file mode 100644 index 5a9bfe166f..0000000000 --- a/guide/src/installation.md +++ /dev/null @@ -1,223 +0,0 @@ -# Install the relayer - -There are two main approaches for obtaining Hermes: - -1. Installation: - 1. If you are running on a Unix machine (Linux/MacOS), then the simplest - option is to [download the latest binary](#install-by-downloading). - 2. You can also install via [Cargo](#install-via-cargo). - -2. Alternatively, [build Hermes directly from source](#build-from-source). - - -## Install by downloading - -Simply head to the GitHub [Releases][releases] page and download the latest -version of Hermes binary matching your platform: -- MacOS: `hermes-v0.15.0-x86_64-apple-darwin.tar.gz` (or .zip), -- Linux: `hermes-v0.15.0-x86_64-unknown-linux-gnu.tar.gz` (or .zip). - -The step-by-step instruction below should carry you through the whole process: - -1. Make the directory where we'll place the binary: - ```shell - mkdir -p $HOME/.hermes/bin - ``` - -2. Extract the binary archive: - ```shell - tar -C $HOME/.hermes/bin/ -vxzf $ARCHIVE_NAME - ``` - -3. Update your path, by adding this line in your `.bashrc` or `.zshrc` shell - configuration file: - ```shell - export PATH="$HOME/.hermes/bin:$PATH" - ``` - -> NOTE: The binary may be initially prevented from running if you're -> on MacOS. -> See the ["Open Anyway" instructions from this support forum][developer-app] -> if that is the case. - -You should now be able to run Hermes by invoking the `hermes` executable. - -```shell -hermes version -``` - -``` -hermes 0.15.0 -``` - -## Install via Cargo - -> NOTE: This approach assumes you have installed all -> the [pre-requisites](./pre_requisites.md) on your machine. - -Hermes is packaged in the `ibc-relayer-cli` Rust crate. -To install the latest release of Hermes, run the following command in a terminal: - -```shell -cargo install ibc-relayer-cli --bin hermes --locked -``` - -This will download and build the crate `ibc-relayer-cli`, and install the -`hermes` binary in `$HOME/.cargo/bin`. - -> If you have not installed Rust and Cargo via [rustup.rs](https://rustup.rs), you may need to -> add the `$HOME/.cargo/bin` directory to your `PATH` environment variable. -> For most shells, this can be done by adding the following line to your -> `.bashrc` or `.zshrc` configuration file: -> -> ```shell -> export PATH="$HOME/.cargo/bin:$PATH" -> ``` - -You should now be able to run Hermes by invoking the `hermes` executable. - -```shell -hermes version -``` - -``` -hermes 0.15.0 -``` - -## Build from source - -### Clone the repository - -Open a terminal and clone the `ibc-rs` repository: - -```shell -git clone https://github.com/informalsystems/ibc-rs.git -``` - -Change to the repository directory -```shell -cd ibc-rs -``` - -### Checkout the latest release - -Go to the [ibc-rs releases](https://github.com/informalsystems/ibc-rs/releases) page to see what is the most recent release. - -Then checkout the release, for example if the most recent release is `v0.15.0` then execute the command: - -```shell -git checkout v0.15.0 -``` - -### Building with `cargo build` - -This command builds all the crates from the [__`ibc-rs`__](https://github.com/informalsystems/ibc-rs) repository, namely: the [__`ibc`__](https://github.com/informalsystems/ibc-rs/tree/master/modules) modules crate, [__`ibc-relayer`__](https://github.com/informalsystems/ibc-rs/tree/master/relayer) crate, [__`ibc-proto`__](https://github.com/informalsystems/ibc-rs/tree/master/proto) crate, and the [__`ibc-relayer-cli`__](https://github.com/informalsystems/ibc-rs/tree/master/relayer-cli) crate. -The last of these crates contains the `hermes` binary. - -```shell -cargo build --release --bin hermes -``` - -
- -> By default, Hermes bundles a [telemetry service and server](./telemetry.md). -> To build Hermes without telemetry support, and get a smaller executable, -> supply the `--no-default-features flag` to `cargo build`: -> -> ```shell -> cargo build --release --no-default-features --bin hermes -> ``` - -If the build is successful, the `hermes` executable will be located in the following location: - -```shell -./target/release/hermes -``` - -__Troubleshooting__: -In case the `cargo build` command above fails, as a first course of action we -recommend trying to run the same command with the additional `locked` flag: - -```shell -cargo build --release --bin hermes --locked -``` - -### Running for the first time - -If you run the `hermes` without any additional parameters you should see the usage and help information: - -```shell -./target/release/hermes -``` - -``` -hermes 0.15.0 -Informal Systems - -USAGE: - hermes - -SUBCOMMANDS: - help Get usage information - config Validate Hermes configuration file - keys Manage keys in the relayer for each chain - create Create objects (client, connection, or channel) on chains - update Update objects (clients) on chains - upgrade Upgrade objects (clients) after chain upgrade - start Start the relayer - query Query objects from the chain - tx Create and send IBC transactions - listen Listen to and display IBC events emitted by a chain - misbehaviour Listen to client update IBC events and handles misbehaviour - version Display version information -``` - -### Creating an alias for the executable - -It might be easier to create an alias for `hermes` so you can just run it by specifying the executable name instead of the whole path. In order to create an alias execute the following command: - -```shell -alias hermes='cargo run --release --bin hermes --' -``` - -## Shell auto-completions - -The `completions` subcommand of Hermes can be used to output a completion script -for a choice of widely used command-line shells. -Refer to `hermes completions --help` for the list. Some shell-specific examples -of setting up auto-completion with this command are provided below; check your -shell configuration to decide on the suitable directory in which to install the script -and any further necessary modifications to the shell's startup files. - -### Bash - -```sh -hermes completions bash > ~/.local/share/bash-completion/completions/hermes -``` - -On a MacOS installation with Homebrew `bash-completion` formula installed, use - -```sh -hermes completions bash > $(brew --prefix)/etc/bash_completion.d/hermes.bash-completion -``` - -### Zsh - -```sh -hermes completions zsh > ~/.zfunc/_hermes -``` - -To make the shell load the script on initialization, add the directory to `fpath` -in your `~/.zshrc` before `compinit`: - -``` -fpath+=~/.zfunc -``` - -## Next Steps - -Go to the [`Configuration`](./config.md) section to learn how to create a configuration file to be used by Hermes. - - -[releases]: https://github.com/informalsystems/ibc-rs/releases -[developer-app]: https://support.apple.com/en-gb/HT202491 diff --git a/guide/src/pre_requisites.md b/guide/src/pre_requisites.md deleted file mode 100644 index 02d06077a3..0000000000 --- a/guide/src/pre_requisites.md +++ /dev/null @@ -1,43 +0,0 @@ -# Pre-requisites - -## 1. Rust - -The IBC Relayer is developed with the [Rust](https://www.rust-lang.org) programming language. In order to build and run the relayer you need to install and configure `Rust` on your machine. - -### Fresh Rust installation - -For instructions on how to install `Rust` on your machine please follow the official [`Notes about Rust Installation`](https://www.rust-lang.org/tools/install). - -The provided instructions will install all the Rust toolchain including `rustc`, `cargo`, and `rustup` that are required to build the project. - -### Version requirements - -Hermes is developed and tested using the latest version of Rust, `1.60` at -the moment. To check that your toolchain is up-to-date run: - -```shell -rustc --version -``` - -In case you already had installed the Rust toolchain in the past, you can -update your installation by running `rustup update`. - -### Testing the installation - -After you install the `Rust` toolchain you can execute the following command: - -```shell -cargo version -``` - -This should display the `cargo` version and confirm the proper installation. - -## 2. Golang - -You will also need the __Go__ programming language installed and configured on your machine. This is a requirement for the the section [Installing Gaia](./tutorials/local-chains/gaia.md) in the [Two Local Chains](./tutorials/local-chains/index.md) tutorial. - -To install and configure Golang on your machine please follow the [Golang official documentation](https://golang.org/doc/install). - -## Next Steps - -Next, go to the [Installation](./installation.md) section to learn how to build Hermes. diff --git a/guide/src/relayer.md b/guide/src/relayer.md deleted file mode 100644 index 0c3f813561..0000000000 --- a/guide/src/relayer.md +++ /dev/null @@ -1,25 +0,0 @@ -# What is Hermes? - -Hermes is an open-source Rust implementation of a relayer for the -[Inter-Blockchain Communication protocol](https://ibc.cosmos.network) (IBC), -released under the [ibc-relayer-cli](https://crates.io/crates/ibc-relayer-cli) crate. - -The **Inter-Blockchain Communication protocol** is an end-to-end, connection-oriented, -stateful protocol for reliable, ordered, and authenticated communication between modules -on separate distributed ledgers. [^ibc] - -An IBC **relayer** is an off-chain process responsible for relaying IBC datagrams between any two chains. -The way it does so is by scanning chain states, building transactions based on these states, -and submitting the transactions to the chains involved in the network. - -The relayer is a central element in the IBC network architecture. This is because chain modules -in this architecture are not directly sending messages to each other over networking infrastructure, -but instead they create and store the data to be retrieved and used by a relayer to build the IBC datagrams. - -We sometimes refer to Hermes as "IBC Relayer CLI", to make it clear that this -is a relayer CLI (i.e., a binary) and distinguish it from the relayer core library -(that is the crate called [`ibc-relayer`](https://crates.io/crates/ibc-relayer)). - -Hermes is actively developed and maintained by [Informal Systems](https://informal.systems) in the [ibc-rs](https://github.com/informalsystems/ibc-rs) repository. - -[^ibc]: [The Interblockchain Communication Protocol: An Overview](https://arxiv.org/pdf/2006.15918.pdf) diff --git a/guide/src/rest-api.md b/guide/src/rest-api.md deleted file mode 100644 index 3593b1f2ae..0000000000 --- a/guide/src/rest-api.md +++ /dev/null @@ -1,177 +0,0 @@ -# REST API - -*Since version 0.7.0.* - -Hermes features a built-in HTTP server which exposes information -about the relayer configuration and state via a REST API. - -## Table of Contents - - - -## Configuration - -The REST API is not active by default, and must be enabled in the relayer configuration: - -```toml -[rest] -enabled = true -host = '127.0.0.1' -port = 3000 -``` - -Please see the [relevant section in the *Configuration* page](./config.md#rest) for details about the configuration options. - -## Endpoints - -### GET `/version` - -This endpoint returns the version of the Hermes (under the `ibc-relayer` key) as well -as the version of the REST server itself (under the `ibc-relayer-rest` key). - -**Example** - -``` -❯ curl -s -X GET 'http://127.0.0.1:3000/version' | jq -``` - -```json -[ - { - "name": "ibc-relayer", - "version": "0.15.0" - }, - { - "name": "ibc-relayer-rest", - "version": "0.1.0" - } -] -``` - -### GET `/chains` - -This endpoint return the identifiers of the chains that Hermes is connected to. -Those identifiers can be used with the `/chain/:id` endpoint to gather more -information about each chain's configuration. See the next section for more details. - -**Example** - -``` -❯ curl -s -X GET 'http://127.0.0.1:3000/chains' | jq -``` - -```json -{ - "status": "success", - "result": [ - "ibc-0", - "ibc-1" - ] -} -``` - -### GET `/chain/:id` - -This endpoint returns the configuration of the chain with the given identifier, -where `:id` stands for the identififer. - -**Example** - -``` -❯ curl -s -X GET 'http://127.0.0.1:3000/chain/ibc-0' | jq -``` - -```json -{ - "status": "success", - "result": { - "id": "ibc-0", - "rpc_addr": "http://127.0.0.1:26657/", - "websocket_addr": "ws://127.0.0.1:26657/websocket", - "grpc_addr": "http://127.0.0.1:9090/", - "rpc_timeout": "10s", - "account_prefix": "cosmos", - "key_name": "testkey", - "store_prefix": "ibc", - "max_gas": 900000000, - "gas_adjustment": null, - "max_msg_num": 60, - "max_tx_size": 2097152, - "clock_drift": "5s", - "trusting_period": "14days", - "trust_threshold": { - "numerator": "1", - "denominator": "3" - }, - "gas_price": { - "price": 0.001, - "denom": "stake" - }, - "packet_filter": { - "policy": "allowall" - } - } -} -``` - -### GET `/state` - -This endpoint returns the current state of the relayer, -namely which chains it is connected to, as well as a description -of all the workers which are currently active. - -``` -❯ curl -s -X GET 'http://127.0.0.1:3000/state' | jq -``` - -```json -{ - "status": "success", - "result": { - "chains": [ - "ibc-0", - "ibc-1" - ], - "workers": { - "Client": [ - { - "id": 3, - "object": { - "type": "Client", - "dst_chain_id": "ibc-1", - "dst_client_id": "07-tendermint-0", - "src_chain_id": "ibc-0" - } - }, - { - "id": 4, - "object": { - "type": "Client", - "dst_chain_id": "ibc-1", - "dst_client_id": "07-tendermint-1", - "src_chain_id": "ibc-0" - } - }, - { - "id": 1, - "object": { - "type": "Client", - "dst_chain_id": "ibc-0", - "dst_client_id": "07-tendermint-0", - "src_chain_id": "ibc-1" - } - }, - { - "id": 2, - "object": { - "type": "Client", - "dst_chain_id": "ibc-0", - "dst_client_id": "07-tendermint-1", - "src_chain_id": "ibc-1" - } - } - ] - } - } -} -``` diff --git a/guide/src/telemetry.md b/guide/src/telemetry.md deleted file mode 100644 index fcc7ca7deb..0000000000 --- a/guide/src/telemetry.md +++ /dev/null @@ -1,190 +0,0 @@ -# Telemetry - -*Since version 0.4.0.* - -To gain a better understanding of the status and activity of the relayer, -Hermes features a built-in telemetry service based on the [OpenTelemetry][opentelemetry] observability framework, -whose metrics can be exposed over HTTP for integration with the [Prometheus][prometheus] monitoring system. - -The official Hermes builds for Linux and macOS come with telemetry support since version 0.4.0, -and can be [downloaded directly from the GitHub Releases][gh-releases] page. - -[gh-releases]: https://github.com/informalsystems/ibc-rs/releases -[opentelemetry]: https://opentelemetry.io -[prometheus]: https://prometheus.io - -## Configuration - -The telemetry service is not active by default, and must be enabled in the relayer configuration: - -```toml -[telemetry] -enabled = true -host = '127.0.0.1' -port = 3001 -``` - -Please see the [relevant section in the *Configuration* page](./config.md#telemetry) for details about the configuration options. - -## Metrics - -The following table describes the metrics currently tracked by the telemetry service: - -| Name | Description | OpenTelemetry type | -| ---------------------------- | ---------------------------------------------------- | ------------------- | -| `workers` | Number of workers per object | `i64` UpDownCounter | -| `ibc_client_updates` | Number of client updates performed per client | `u64` Counter | -| `ibc_client_misbehaviours` | Number of misbehaviours detected per client | `u64` Counter | -| `ibc_receive_packets` | Number of receive packets relayed per channel | `u64` Counter | -| `ibc_acknowledgment_packets` | Number of acknowledgment packets relayed per channel | `u64` Counter | -| `ibc_timeout_packets` | Number of timeout packets relayed per channel | `u64` Counter | -| `wallet_balance` | How much balance (coins) there is left in each wallet key that Hermes is using. | `u64` ValueRecorder | -| `ws_events` | How many IBC events did Hermes receive via the websocket subscription, in total since starting up, per chain. | Counter | -| `ws_reconnect` | Number of times Hermes had to reconnect to the WebSocket endpoint | Counter | -| `tx_latency_submitted` | Latency for all transactions submitted to a chain (i.e., difference between the moment when Hermes received an event until the corresponding transaction(s) were submitted). | `u64` ValueRecorder | -| `tx_latency_confirmed` | Latency for all transactions confirmed by a chain (i.e., difference between the moment when Hermes received an event until the corresponding transaction(s) were confirmed). Requires `tx_confirmation = true`. | `u64` ValueRecorder | -| `msg_num` | How many messages Hermes submitted to a specific chain. | `u64` Counter | - -## Integration with Prometheus - -With the settings , the telemetry service will be enabled and will serve the metrics using -the Prometheus encoder over HTTP at [`http://localhost:3001/metrics`](http://localhost:3001/metrics). - -After starting Hermes with `hermes start`, and letting it run for a while to relay packets, -open [`http://localhost:3001/metrics`](http://localhost:3001/metrics) in a browser, you should -see Prometheus-encoded metrics. - -For example, with two channels and after transferring some tokens between the chains: - -```text -# HELP cache_hits Number of cache hits for queries emitted by the relayer, per chain and query type -# TYPE cache_hits counter -cache_hits{chain="ibc-0",query_type="query_channel"} 276 -cache_hits{chain="ibc-0",query_type="query_client_state"} 177 -cache_hits{chain="ibc-0",query_type="query_connection"} 160 -cache_hits{chain="ibc-1",query_type="query_channel"} 240 -cache_hits{chain="ibc-1",query_type="query_client_state"} 173 -cache_hits{chain="ibc-1",query_type="query_connection"} 160 -# HELP ibc_acknowledgment_packets Number of acknowledgment packets relayed per channel -# TYPE ibc_acknowledgment_packets counter -ibc_acknowledgment_packets{src_chain="ibc-0",src_channel="channel-0",src_port="transfer"} 0 -ibc_acknowledgment_packets{src_chain="ibc-0",src_channel="channel-1",src_port="transfer"} 42 -ibc_acknowledgment_packets{src_chain="ibc-1",src_channel="channel-0",src_port="transfer"} 110 -ibc_acknowledgment_packets{src_chain="ibc-1",src_channel="channel-1",src_port="transfer"} 0 -# HELP ibc_receive_packets Number of receive packets relayed per channel -# TYPE ibc_receive_packets counter -ibc_receive_packets{src_chain="ibc-0",src_channel="channel-0",src_port="transfer"} 110 -ibc_receive_packets{src_chain="ibc-0",src_channel="channel-1",src_port="transfer"} 0 -ibc_receive_packets{src_chain="ibc-1",src_channel="channel-0",src_port="transfer"} 0 -ibc_receive_packets{src_chain="ibc-1",src_channel="channel-1",src_port="transfer"} 42 -# HELP ibc_timeout_packets Number of timeout packets relayed per channel -# TYPE ibc_timeout_packets counter -ibc_timeout_packets{src_chain="ibc-0",src_channel="channel-0",src_port="transfer"} 0 -ibc_timeout_packets{src_chain="ibc-0",src_channel="channel-1",src_port="transfer"} 0 -ibc_timeout_packets{src_chain="ibc-1",src_channel="channel-0",src_port="transfer"} 0 -ibc_timeout_packets{src_chain="ibc-1",src_channel="channel-1",src_port="transfer"} 0 -# HELP msg_num How many messages Hermes submitted to the chain, per chain -# TYPE msg_num counter -msg_num{chain="ibc-0"} 168 -msg_num{chain="ibc-1"} 156 -# HELP queries Number of queries emitted by the relayer, per chain and query type -# TYPE queries counter -queries{chain="ibc-0",query_type="query_application_status"} 23 -queries{chain="ibc-0",query_type="query_channel"} 88 -queries{chain="ibc-0",query_type="query_client_connections"} 2 -queries{chain="ibc-0",query_type="query_client_state"} 383 -queries{chain="ibc-0",query_type="query_clients"} 1 -queries{chain="ibc-0",query_type="query_connection"} 2 -queries{chain="ibc-0",query_type="query_connection_channels"} 2 -queries{chain="ibc-0",query_type="query_consensus_state"} 392 -queries{chain="ibc-0",query_type="query_consensus_states"} 2 -queries{chain="ibc-0",query_type="query_latest_height"} 1 -queries{chain="ibc-0",query_type="query_packet_acknowledgements"} 5 -queries{chain="ibc-0",query_type="query_packet_commitments"} 10 -queries{chain="ibc-0",query_type="query_staking_params"} 2 -queries{chain="ibc-0",query_type="query_txs"} 76 -queries{chain="ibc-0",query_type="query_unreceived_acknowledgements"} 241 -queries{chain="ibc-0",query_type="query_unreceived_packets"} 127 -queries{chain="ibc-1",query_type="query_application_status"} 20 -queries{chain="ibc-1",query_type="query_channel"} 224 -queries{chain="ibc-1",query_type="query_client_connections"} 2 -queries{chain="ibc-1",query_type="query_client_state"} 387 -queries{chain="ibc-1",query_type="query_clients"} 1 -queries{chain="ibc-1",query_type="query_connection"} 2 -queries{chain="ibc-1",query_type="query_connection_channels"} 2 -queries{chain="ibc-1",query_type="query_consensus_state"} 394 -queries{chain="ibc-1",query_type="query_consensus_states"} 3 -queries{chain="ibc-1",query_type="query_latest_height"} 1 -queries{chain="ibc-1",query_type="query_packet_acknowledgements"} 5 -queries{chain="ibc-1",query_type="query_packet_commitments"} 10 -queries{chain="ibc-1",query_type="query_staking_params"} 2 -queries{chain="ibc-1",query_type="query_txs"} 56 -queries{chain="ibc-1",query_type="query_unreceived_acknowledgements"} 127 -queries{chain="ibc-1",query_type="query_unreceived_packets"} 292 -# HELP tx_latency_confirmed The latency for all transactions submitted to a specific chain, i.e. the difference between the moment when Hermes received a batch of events until the corresponding transaction(s) were confirmed. Milliseconds. -# TYPE tx_latency_confirmed histogram -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="0.5"} 0 -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="0.9"} 0 -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="0.99"} 0 -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="+Inf"} 4 -tx_latency_confirmed_sum{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer"} 22466 -tx_latency_confirmed_count{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer"} 4 -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="0.5"} 0 -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="0.9"} 0 -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="0.99"} 0 -tx_latency_confirmed_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="+Inf"} 1 -tx_latency_confirmed_sum{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer"} 4256 -tx_latency_confirmed_count{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer"} 1 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="0.5"} 0 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="0.9"} 0 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="0.99"} 0 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="+Inf"} 2 -tx_latency_confirmed_sum{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer"} 9408 -tx_latency_confirmed_count{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer"} 2 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="0.5"} 0 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="0.9"} 0 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="0.99"} 0 -tx_latency_confirmed_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="+Inf"} 1 -tx_latency_confirmed_sum{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer"} 3173 -tx_latency_confirmed_count{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer"} 1 -# HELP tx_latency_submitted The latency for all transactions submitted to a specific chain, i.e. the difference between the moment when Hermes received a batch of events and when it submitted the corresponding transaction(s). Milliseconds. -# TYPE tx_latency_submitted histogram -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="0.5"} 0 -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="0.9"} 0 -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="0.99"} 0 -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer",le="+Inf"} 5 -tx_latency_submitted_sum{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer"} 14428 -tx_latency_submitted_count{chain="ibc-0",channel="channel-0",counterparty="ibc-1",port="transfer"} 5 -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="0.5"} 0 -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="0.9"} 0 -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="0.99"} 0 -tx_latency_submitted_bucket{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer",le="+Inf"} 1 -tx_latency_submitted_sum{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer"} 729 -tx_latency_submitted_count{chain="ibc-0",channel="channel-1",counterparty="ibc-1",port="transfer"} 1 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="0.5"} 0 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="0.9"} 0 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="0.99"} 0 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer",le="+Inf"} 2 -tx_latency_submitted_sum{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer"} 1706 -tx_latency_submitted_count{chain="ibc-1",channel="channel-0",counterparty="ibc-0",port="transfer"} 2 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="0.5"} 0 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="0.9"} 0 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="0.99"} 0 -tx_latency_submitted_bucket{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer",le="+Inf"} 1 -tx_latency_submitted_sum{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer"} 791 -tx_latency_submitted_count{chain="ibc-1",channel="channel-1",counterparty="ibc-0",port="transfer"} 1 -# HELP wallet_balance The balance in each wallet that Hermes is using, per wallet, denom and chain -# TYPE wallet_balance gauge -wallet_balance{account="cosmos1934akx97773lsjjs9x74dr03uuam29hcc9grp3",chain="ibc-0",denom="stake"} 99999970473 -wallet_balance{account="cosmos1hngzqscyg476nd68qggxps8r2aq56lne45ps8n",chain="ibc-1",denom="stake"} 99999978431 -# HELP workers Number of workers per object -# TYPE workers gauge -workers{type="client"} 4 -workers{type="packet"} 4 -workers{type="wallet"} 2 -# HELP ws_events How many IBC events did Hermes receive via the WebSocket subscription, per chain -# TYPE ws_events counter -ws_events{chain="ibc-0"} 443 -ws_events{chain="ibc-1"} 370 -``` - diff --git a/guide/src/tutorials/index.md b/guide/src/tutorials/index.md deleted file mode 100644 index 8eee017999..0000000000 --- a/guide/src/tutorials/index.md +++ /dev/null @@ -1,9 +0,0 @@ -# Tutorials - -This section includes tutorials for some common relayer uses cases and commands. You can also refer to the [Commands Reference](../commands/index.md) section to learn more about individual commands. - -## Basic tutorials - -**[Two Local Chains](./local-chains/index.md)** - -In this tutorial you will learn how to start two local [`Cosmos Gaia`](https://github.com/cosmos/gaia) chains that support the `IBC` protocol and start relaying packets between them. diff --git a/guide/src/tutorials/local-chains/gaia.md b/guide/src/tutorials/local-chains/gaia.md deleted file mode 100644 index 8736d48af2..0000000000 --- a/guide/src/tutorials/local-chains/gaia.md +++ /dev/null @@ -1,42 +0,0 @@ -# Install Gaia - -The script to start the chains requires gaia to be installed. - -> __NOTE__: This assumes you have `Golang` programming language installed on -> your machine. If not, please ensure you install before proceeding. See -> more details in the [Pre-requisites](../../pre_requisites.md#2-golang) section. - -#### Clone gaia - -Clone the repository from Github: - -```shell -git clone https://github.com/cosmos/gaia.git ~/go/src/github.com/cosmos/gaia -``` - -#### Build and Install - -Run the `make` command to build and install `gaiad` - -```shell -cd ~/go/src/github.com/cosmos/gaia -git checkout v4.2.1 -make install -``` - -If the command above is successful you can run the following command to ensure it was properly installed: - -```shell -gaiad version --log_level error --long | head -n4 -``` -Output: -```shell -name: gaia -server_name: gaiad -version: v4.2.1 -commit: dbd8a6fb522c571debf958837f9113c56d418f6b -``` - -## Next Steps - -In the next section you will learn how to [start two local chains](./start.md) diff --git a/guide/src/tutorials/local-chains/identifiers.md b/guide/src/tutorials/local-chains/identifiers.md deleted file mode 100644 index b72682d7b1..0000000000 --- a/guide/src/tutorials/local-chains/identifiers.md +++ /dev/null @@ -1,130 +0,0 @@ -# Identifiers - - -A chain allocates identifiers when it creates clients, connections and channels. These identifiers can subsequently be used to refer to existing clients, connections and channels. - -> NOTE: If you want to ensure you get the same identifiers while following the tutorials, run the each of the three commands below __once__ on `ibc-1`. This will ensure that when going through the tutorial, a second channel on `ibc-1` with identifier `channel-1` will created. - -Chains allocate identifiers using a chain specific allocation scheme. Currently, *cosmos-sdk* implementation uses the follow identifiers: - -### 1. Client Identifiers - -__`07-tendermint-`__ for tendermint clients - -For example `07-tendermint-0` is assigned to the first client created on `ibc-1`: - - ```shell -hermes tx raw create-client ibc-1 ibc-0 - ``` - - ```json -Success: CreateClient( - CreateClient( - Attributes { - height: Height { - revision: 1, - height: 103, - }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 0, - height: 112, - }, - }, - ), -) - ``` - -We will create a second client on `ibc-1` with identifier `07-tendermint-1` in the client tutorial. - -### 2. Connection Identifiers - -__`connection-`__ for connections - -For example `connection-0` is assigned to the first connection created on `ibc-1`: - -```shell -hermes tx raw conn-init ibc-1 ibc-0 07-tendermint-0 07-tendermint-0 -``` - -```json -Success: OpenInitConnection( - OpenInit( - Attributes { - height: Height { - revision: 1, - height: 119, - }, - connection_id: Some( - ConnectionId( - "connection-0", - ), - ), - client_id: ClientId( - "07-tendermint-0", - ), - counterparty_connection_id: None, - counterparty_client_id: ClientId( - "07-tendermint-0", - ), - }, - ), -) -``` -We will create a second connection on `ibc-1` with identifier `connection-1` in the connection tutorial. - -### 3. Channel Identifiers - -`channel-` for channels - -For example `channel-0` is assigned to the first channel created on `ibc-1`: - -```shell -hermes tx raw chan-open-init ibc-1 ibc-0 connection-0 transfer transfer -``` - -```json -Success: OpenInitChannel( - OpenInit( - Attributes { - height: Height { - revision: 1, - height: 225, - }, - port_id: PortId( - "transfer", - ), - channel_id: Some( - ChannelId( - "channel-0", - ), - ), - connection_id: ConnectionId( - "connection-0", - ), - counterparty_port_id: PortId( - "transfer", - ), - counterparty_channel_id: None, - }, - ), -) -``` - -In the following tutorials the __`ibc-0`__ and __`ibc-1`__ chains are setup and configured. - -For clarity, the tutorials run on a setup where the identifiers allocated to the client, connection and channel on __`ibc-0`__ are __`07-tendermint-0`__, __`connection-0`__ and __`channel-0`__ respectively. Identifiers allocated to the client, connection and channel on __`ibc-1`__ are __`07-tendermint-1`__, __`connection-1`__ and __`channel-1`__ respectively. - -Before going over the next sections, please ensure the commands above are executed. - -### Next Steps - -The following sections describe the commands to connect and relay packets between two chains. You can: - -1. use a [simplified approach](./relay-paths/index.md) for managing relaying paths, or -2. use [individual (raw) transactions](./raw/index.md) to create - all the necessary chain objects (clients, connections, channels) and relay packets. - diff --git a/guide/src/tutorials/local-chains/index.md b/guide/src/tutorials/local-chains/index.md deleted file mode 100644 index 4ffdfa025f..0000000000 --- a/guide/src/tutorials/local-chains/index.md +++ /dev/null @@ -1,7 +0,0 @@ -# Tutorial: Relayer with two local chains - -In this tutorial we will show how you can test the relayer against two chains, we provide a script that can start two separate chains and configure them automatically. This is the easiest way to get started. - -The script starts two [`gaia`](https://github.com/cosmos/gaia) chains that support the `IBC` protocol. - -Follow the steps in this tutorial section starting with the [Install Gaia](./gaia.md) section.# Local chains diff --git a/guide/src/tutorials/local-chains/raw/channel.md b/guide/src/tutorials/local-chains/raw/channel.md deleted file mode 100644 index 6902d98ae4..0000000000 --- a/guide/src/tutorials/local-chains/raw/channel.md +++ /dev/null @@ -1,46 +0,0 @@ -# 3. Channel Handshake - -## 3.1 `chan-open-init` - -Initialize a new unordered channel on `ibc-0`: -```shell -hermes tx raw chan-open-init ibc-0 ibc-1 connection-0 transfer transfer -o UNORDERED -``` - -## 3.2 `chan-open-try` - -Send a channel open try to `ibc-1`: -```shell -hermes tx raw chan-open-try ibc-1 ibc-0 connection-1 transfer transfer -s channel-0 -``` - -Take note of the ID allocated by the chain, e.g. `channel-1` on `ibc-1`. Use in the `chan-open-ack` CLI - -## 3.3 `chan-open-ack` - -Send a channel open acknowledgment to `ibc-0`: -```shell -hermes tx raw chan-open-ack ibc-0 ibc-1 connection-0 transfer transfer -d channel-0 -s channel-1 -``` - -## 3.4 `chan-open-confirm` - -Send the open confirmation to `ibc-1`: -```shell -hermes tx raw chan-open-confirm ibc-1 ibc-0 connection-1 transfer transfer -d channel-1 -s channel-0 -``` - -## 3.5 `query channel` -To verify that the two ends are in `Open` state: - -```shell -hermes query channel end ibc-0 transfer channel-0 -``` - -```shell -hermes query channel end ibc-1 transfer channel-1 -``` - -## Next Steps - -In the next section, we'll start to [relay packets](./packet.md) diff --git a/guide/src/tutorials/local-chains/raw/client.md b/guide/src/tutorials/local-chains/raw/client.md deleted file mode 100644 index b29ebe1e08..0000000000 --- a/guide/src/tutorials/local-chains/raw/client.md +++ /dev/null @@ -1,117 +0,0 @@ -# 1. Configuring clients - -### 1.1. `create client` - -First you will need to create a client for each chain: - -This command submits a transaction to a destination chain (`ibc-0`) with a request to create a client for a source chain (`ibc-1`): - -```shell -hermes tx raw create-client ibc-0 ibc-1 -``` - -if the command is successful a message similar to the one below will be displayed `status:success`: - -```json -{ - Success: CreateClient( - CreateClient( - Attributes { - height: Height { revision: 0, height: 43 }, - client_id: ClientId( - "07-tendermint-0", - ), - client_type: Tendermint, - consensus_height: Height { revision: 1, height: 32 }, - }, - ), - ) -} -``` - -> Please note the `client_id` value returned. You will need that for other commands. - -You can also execute a __query__ to view the client state on destination chain `ibc-0` by specifying the `client_id` value `07-tendermint-0`: - -```shell -hermes query client state ibc-0 07-tendermint-0 -``` - -which show a message similar to the one below: - -```json -Success: ClientState { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - trust_level: TrustThresholdFraction { - numerator: 1, - denominator: 3, - }, - trusting_period: 1209600s, - unbonding_period: 1814400s, - max_clock_drift: 3s, - frozen_height: Height { - revision: 0, - height: 0, - }, - latest_height: Height { - revision: 1, - height: 38, - }, - upgrade_path: [ - "upgrade", - "upgradedIBCState", - ], - allow_update_after_expiry: true, - allow_update_after_misbehaviour: true, -} -``` - -Now let's do the same for `ibc-1` as the destination chain: - -```shell -hermes tx raw create-client ibc-1 ibc-0 -``` - -Take note of the `client_id` allocated for this client. In the examples we assume is `07-tendermint-1` (this client identity is obtained by creating two clients on ibc-1 for ibc-0). - -As before, if the command is successful a message with `status:success` is displayed: - -```json -Success: CreateClient( - CreateClient( - Attributes { - height: Height { - revision: 1, - height: 135, - }, - client_id: ClientId( - "07-tendermint-1", - ), - client_type: Tendermint, - consensus_height: Height { - revision: 0, - height: 145, - }, - }, - ), -) -``` - -### 1.2 `update-client` - -Client states can be updated by sending an `update-client` transaction: - -```shell -hermes tx raw update-client ibc-0 07-tendermint-0 -``` - -```shell -hermes tx raw update-client ibc-1 07-tendermint-1 -``` - -## Next Steps - -In the next section, we'll establish the [Connection Handshake](./connection.md) diff --git a/guide/src/tutorials/local-chains/raw/connection.md b/guide/src/tutorials/local-chains/raw/connection.md deleted file mode 100644 index f93c35e074..0000000000 --- a/guide/src/tutorials/local-chains/raw/connection.md +++ /dev/null @@ -1,50 +0,0 @@ -# 2. Connection Handshake - -## 2.1 `conn-init` - -Initialize a new connection on `ibc-0`: -```shell -hermes tx raw conn-init ibc-0 ibc-1 07-tendermint-0 07-tendermint-1 -``` - -Take note of the ID allocated by the chain, e.g. `connection-0` on `ibc-0` in order to use it in the `conn-try` command below. - -## 2.2 `conn-try` - -Send a connection try to `ibc-1`: -```shell -hermes tx raw conn-try ibc-1 ibc-0 07-tendermint-1 07-tendermint-0 -s connection-0 -``` - -Take note of the ID allocated by the chain, e.g. `connection-1` on `ibc-1`. Use in the `conn-ack` CLI - -## 2.3 `conn-ack` - -Send a connection open acknowledgment to `ibc-0`: -```shell -hermes tx raw conn-ack ibc-0 ibc-1 07-tendermint-0 07-tendermint-1 -d connection-0 -s connection-1 -``` - -## 2.4 `conn-confirm` - -Send the open confirmation to `ibc-1`: -```shell -hermes tx raw conn-confirm ibc-1 ibc-0 07-tendermint-1 07-tendermint-0 -d connection-1 -s connection-0 -``` - -## 2.5 `query connection` - -To verify that the two ends are in `Open` state: - -```shell -hermes query connection end ibc-0 connection-0 -``` - -```shell -hermes query connection end ibc-1 connection-1 -``` - - -## Next Steps - -In the next section, we'll [establish a new channel](./channel.md) diff --git a/guide/src/tutorials/local-chains/raw/index.md b/guide/src/tutorials/local-chains/raw/index.md deleted file mode 100644 index bc0fbeebaa..0000000000 --- a/guide/src/tutorials/local-chains/raw/index.md +++ /dev/null @@ -1,12 +0,0 @@ -# Connecting the chains - -In the rest of this section we will show how to create the clients, establish a connection and a channel between the two chains, and relay packets over the channel. But first, make sure you followed the steps in the [start the local chains](../start.md) and [Identifiers section](../identifiers.md) - -## Steps to start relaying packets between the two local chains - -In order to start relaying packets please follow the steps below: - -* [Configure Clients](./client.md) -* [Connection Handshake](./connection.md) -* [Open the Channel](./channel.md) -* [Relay Packets](./packet.md) diff --git a/guide/src/tutorials/local-chains/raw/packet.md b/guide/src/tutorials/local-chains/raw/packet.md deleted file mode 100644 index 426b374d50..0000000000 --- a/guide/src/tutorials/local-chains/raw/packet.md +++ /dev/null @@ -1,92 +0,0 @@ -# 4. Relay Packets - -### 4.1 Query balances - -- balance at ibc-0 - - ```shell - gaiad --node tcp://localhost:26657 query bank balances $(gaiad --home data/ibc-0 keys --keyring-backend="test" show user -a) - ``` - -- balance at ibc-1 - - ```shell - gaiad --node tcp://localhost:26557 query bank balances $(gaiad --home data/ibc-1 keys --keyring-backend="test" show user -a) - ``` - -> Note that the addresses used in the two commands above are configured in `dev-env`. - -### 4.2 Packet relaying - -First, we'll send `9999` `samoleans` from `ibc-0` to `ibc-1`. - -- start the transfer of 9999 samoleans from `ibc-0` to `ibc-1`. This sends a `MsgTransfer` in a transaction to `ibc-0` - - ```shell - hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 9999 -o 1000 -n 1 -d samoleans - ``` - -- query packet commitments on `ibc-0` - - ```shell - hermes query packet commitments ibc-0 transfer channel-0 - ``` - -- query unreceived packets on `ibc-1` - - ```shell - hermes query packet unreceived-packets ibc-1 transfer channel-1 - ``` - -- send `recv_packet` to `ibc-1` - - ```shell - hermes tx raw packet-recv ibc-1 ibc-0 transfer channel-0 - ``` - -- query unreceived acks on `ibc-0` - - ```shell - hermes query packet unreceived-acks ibc-0 transfer channel-0 - ``` - -- send acknowledgement to `ibc-0` - - ```shell - hermes tx raw packet-ack ibc-0 ibc-1 transfer channel-1 - ``` - -Send those samoleans back, from `ibc-1` to `ibc-0`. - -```shell -hermes tx raw ft-transfer ibc-0 ibc-1 transfer channel-1 9999 -o 1000 -n 1 -d ibc/49D321B40FCF56B0370E5673CF090389C8E9CD185209FBE1BEE5D94E58E69BDC -hermes tx raw packet-recv ibc-0 ibc-1 transfer channel-1 -hermes tx raw packet-ack ibc-1 ibc-0 transfer channel-0 -``` - -The `ibc/49D321B40FCF56B0370E5673CF090389C8E9CD185209FBE1BEE5D94E58E69BDC` denominator above can be obtained by querying the balance at `ibc-1` after the transfer from `ibc-0` to `ibc-1` is concluded. - -Next we will test the packet timeouts. -- send 1 packet with low timeout height offset to ibc-0 - - ```shell - hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 9999 -o 2 -n 1 - ``` - -- send timeout to `ibc-0` - - ```shell - hermes tx raw packet-recv ibc-1 ibc-0 transfer channel-0 - ``` - -- send 1 packet with 2 second timeout to ibc-0 - - ```shell - hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 9999 -t 2 -n 1 - ``` - -- send timeout to `ibc-0` - - ```shell - hermes tx raw packet-recv ibc-1 ibc-0 transfer channel-0 - ``` \ No newline at end of file diff --git a/guide/src/tutorials/local-chains/relay-paths/create-new-path.md b/guide/src/tutorials/local-chains/relay-paths/create-new-path.md deleted file mode 100644 index 1ddc70699e..0000000000 --- a/guide/src/tutorials/local-chains/relay-paths/create-new-path.md +++ /dev/null @@ -1,65 +0,0 @@ -# Create a new path - -Perform client creation, connection and channel handshake to establish a new path between the `transfer` ports on `ibc-0` and `ibc-1` chains. - -```shell -hermes create channel ibc-0 -c ibc-1 --port-a transfer --port-b transfer --new-client-connection -``` - -If all the handshakes are performed successfully you should see a message similar to the one below: - -```json -Success: Channel { - ordering: Unordered, - a_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - b_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-1", - ), - connection_id: ConnectionId( - "connection-1", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-1", - ), - }, - connection_delay: 0s, - version: Some( - "ics20-1", - ), -} - -``` - -Note that for each side, *a_side* (__ibc-0__) and *b_side* (__ibc-1__) there are a __client_id__, __connection_id__, __channel_id__ and __port_id__. -With all these established, you have [a path that you can relay packets over](./multiple-paths.md). diff --git a/guide/src/tutorials/local-chains/relay-paths/index.md b/guide/src/tutorials/local-chains/relay-paths/index.md deleted file mode 100644 index bcd7dfa3aa..0000000000 --- a/guide/src/tutorials/local-chains/relay-paths/index.md +++ /dev/null @@ -1,12 +0,0 @@ - -# Connect the chains using relay paths - -A relay path refers to a specific channel used to interconnect two chains and over which packets are being sent. - -Hermes can be started to listen for packet events on the two ends of multiple paths and relay packets over these paths. -This can be done over a new path or over existing paths. - -- [Create a new path](./create-new-path.md) -- [Packet relaying on multiple paths](./multiple-paths.md) - -Before proceeding to the sections above, please first, make sure you followed the steps in the [Identifiers section](../identifiers.md) diff --git a/guide/src/tutorials/local-chains/relay-paths/multiple-paths.md b/guide/src/tutorials/local-chains/relay-paths/multiple-paths.md deleted file mode 100644 index e314197603..0000000000 --- a/guide/src/tutorials/local-chains/relay-paths/multiple-paths.md +++ /dev/null @@ -1,328 +0,0 @@ -# Relay packets on multiple paths - -Hermes can relay packets over all current or future paths between the configured set of chains. - -Follow the steps below to connect three chains together and relay packets between them: - -1. Paste the following configuration in the standard Hermes configuration file at `~/.hermes/config.toml`: - - ```toml - [global] - log_level = 'info' - - [mode] - - [mode.clients] - enabled = true - refresh = true - misbehaviour = true - - [mode.connections] - enabled = false - - [mode.channels] - enabled = false - - [mode.packets] - enabled = true - clear_interval = 100 - clear_on_start = true - tx_confirmation = true - - [[chains]] - id = 'ibc-0' - rpc_addr = 'http://127.0.0.1:26657' - grpc_addr = 'http://127.0.0.1:9090' - websocket_addr = 'ws://127.0.0.1:26657/websocket' - rpc_timeout = '10s' - account_prefix = 'cosmos' - key_name = 'testkey' - store_prefix = 'ibc' - max_gas = 2000000 - gas_price = { price = 0.001, denom = 'stake' } - gas_adjustment = 0.1 - clock_drift = '5s' - trusting_period = '14days' - trust_threshold = { numerator = '1', denominator = '3' } - - [[chains]] - id = 'ibc-1' - rpc_addr = 'http://127.0.0.1:26557' - grpc_addr = 'http://127.0.0.1:9091' - websocket_addr = 'ws://127.0.0.1:26557/websocket' - rpc_timeout = '10s' - account_prefix = 'cosmos' - key_name = 'testkey' - store_prefix = 'ibc' - max_gas = 2000000 - gas_price = { price = 0.001, denom = 'stake' } - gas_adjustment = 0.1 - clock_drift = '5s' - trusting_period = '14days' - trust_threshold = { numerator = '1', denominator = '3' } - - [[chains]] - id = 'ibc-2' - rpc_addr = 'http://127.0.0.1:26457' - grpc_addr = 'http://127.0.0.1:9092' - websocket_addr = 'ws://127.0.0.1:26457/websocket' - rpc_timeout = '10s' - account_prefix = 'cosmos' - key_name = 'testkey' - store_prefix = 'ibc' - max_gas = 2000000 - gas_price = { price = 0.001, denom = 'stake' } - gas_adjustment = 0.1 - clock_drift = '5s' - trusting_period = '14days' - trust_threshold = { numerator = '1', denominator = '3' } - ``` - - This configuration has three chains `ibc-0`, `ibc-1` and `ibc-2`. - -2. Run the `dev-env` script with the parameters below to start three chains: - - ```bash - ./scripts/dev-env ~/.hermes/config.toml ibc-0 ibc-1 ibc-2 - ``` - - > __NOTE__: The script will prompt you to delete the data folder, double check the path and - > if it points to the `data` directory in the current directory, answer __'yes'__. - - The script configures and starts three __`gaiad`__ instances, named __`ibc-0`__, and __`ibc-1`__, and __`ibc-2`__. - - -3. Create a channel between `ibc-0` and `ibc-1`. Since this is the first time - we're connecting these two chains, we'll need to spin up a client and a - connection between them as well. The `create channel` command gives us the - convenient option to create a client and a connection. Keep in mind that this - is not the default behavior of `create channel`, but in this case we're - making an exception. Execute the following command: - - ```shell - hermes create channel ibc-0 --chain-b ibc-1 --port-a transfer --port-b transfer --new-client-connection - ``` - - Then respond 'yes' to the prompt that pops up. Once the command has run to - completion, you should see the following among the output logs: - - ```json - (...) - - Success: Channel { - ordering: Unordered, - a_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-0", - version: 0, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - b_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - connection_delay: 0s, - version: Some( - "ics20-1", - ), - } - ``` - - Note that the channel identifier on both `ibc-0` and `ibc-1` is `channel-0`. - -4. Create a channel between `ibc-1` and `ibc-2` using the structure of the - previous invocation we used to create a channel between `ibc-0` and `ibc-1`: - - ```shell - hermes create channel ibc-1 --chain-b ibc-2 --port-a transfer --port-b transfer --new-client-connection - ``` - - ```json - (...) - - Success: Channel { - ordering: Unordered, - a_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-1", - version: 1, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-1", - ), - connection_id: ConnectionId( - "connection-1", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-1", - ), - }, - b_side: ChannelSide { - chain: ProdChainHandle { - chain_id: ChainId { - id: "ibc-2", - version: 2, - }, - runtime_sender: Sender { .. }, - }, - client_id: ClientId( - "07-tendermint-0", - ), - connection_id: ConnectionId( - "connection-0", - ), - port_id: PortId( - "transfer", - ), - channel_id: ChannelId( - "channel-0", - ), - }, - connection_delay: 0s, - version: Some( - "ics20-1", - ), - } - ``` - - Note that the channel identifier on `ibc-1` is `channel-1`, and on `ibc-2` it is `channel-0`. - -5. Start Hermes using the `start` command: - - ```shell - hermes start - ``` - - Hermes will first relay the pending packets that have not been relayed and then - start passive relaying by listening to and acting on packet events. - -6. In a separate terminal, use the `ft-transfer` command to send: - - - Two packets from `ibc-0` to `ibc-1` from source channel `channel-0` - - ```shell - hermes tx raw ft-transfer ibc-1 ibc-0 transfer channel-0 9999 -o 1000 -n 2 - ``` - - ```json - Success: [ - SendPacket( - SendPacket { - height: revision: 0, height: 3056, - packet: PortId("transfer") ChannelId("channel-0") Sequence(3), - }, - ), - SendPacket( - SendPacket { - height: revision: 0, height: 3056, - packet: PortId("transfer") ChannelId("channel-0") Sequence(4), - }, - ), - ] - ``` - - - Two packets from `ibc-1` to `ibc-2` from source channel `channel-1` - - ```shell - hermes tx raw ft-transfer ibc-2 ibc-1 transfer channel-1 9999 -o 1000 -n 2 - ``` - - ```json - Success: [ - SendPacket( - SendPacket { - height: revision: 1, height: 3076, - packet: PortId("transfer") ChannelId("channel-1") Sequence(3), - }, - ), - SendPacket( - SendPacket { - height: revision: 1, height: 3076, - packet: PortId("transfer") ChannelId("channel-1") Sequence(4), - }, - ), - ] - ``` - -7. Observe the output on the relayer terminal, verify that the send events are processed, and that the `recv_packets` are sent out. - - ```text - (...) - - INFO ibc_relayer::link: [ibc-0 -> ibc-1] result events: - UpdateClientEv(ev_h:1-3048, 07-tendermint-0(0-3057), ) - WriteAcknowledgementEv(h:1-3048, seq:3, path:channel-0/transfer->channel-0/transfer, toh:1-4045, tos:0)) - WriteAcknowledgementEv(h:1-3048, seq:4, path:channel-0/transfer->channel-0/transfer, toh:1-4045, tos:0)) - INFO ibc_relayer::link: [ibc-0 -> ibc-1] success - - (...) - - INFO ibc_relayer::link: [ibc-1 -> ibc-0] clearing old packets - INFO ibc_relayer::link: [ibc-1 -> ibc-0] received from query_txs [] - INFO ibc_relayer::link: [ibc-1 -> ibc-0] finished clearing pending packets - INFO ibc_relayer::link: [ibc-1 -> ibc-0] generate messages from batch with 2 events - INFO ibc_relayer::link: [ibc-1 -> ibc-0] scheduling op. data with 2 msg(s) for Destination chain (height 1-3049) - INFO ibc_relayer::link: [ibc-1 -> ibc-0] relay op. data to Destination, proofs height 1-3048, (delayed by: 2.154603ms) [try 1/10] - INFO ibc_relayer::link: [ibc-1 -> ibc-0] prepending Destination client update @ height 1-3049 - INFO ibc_relayer::link: [ibc-1 -> ibc-0] assembled batch of 3 message(s) - INFO ibc_relayer::link: [ibc-1 -> ibc-0] result events: - UpdateClientEv(ev_h:0-3059, 07-tendermint-0(1-3049), ) - AcknowledgePacketEv(h:0-3059, seq:3, path:channel-0/transfer->channel-0/transfer, toh:1-4045, tos:0)) - AcknowledgePacketEv(h:0-3059, seq:4, path:channel-0/transfer->channel-0/transfer, toh:1-4045, tos:0)) - INFO ibc_relayer::link: [ibc-1 -> ibc-0] success - - (...) - ``` - -8. Query the unreceived packets and acknowledgments on `ibc-1` and `ibc-2` from a different terminal: - - ```shell - hermes query packet unreceived-packets ibc-1 transfer channel-0 - hermes query packet unreceived-acks ibc-0 transfer channel-0 - hermes query packet unreceived-packets ibc-2 transfer channel-0 - hermes query packet unreceived-acks ibc-1 transfer channel-1 - ``` - - If everything went well, each of these commands should result in: - - ``` - Success: [] - ``` diff --git a/guide/src/tutorials/local-chains/start.md b/guide/src/tutorials/local-chains/start.md deleted file mode 100644 index 791d977fb3..0000000000 --- a/guide/src/tutorials/local-chains/start.md +++ /dev/null @@ -1,222 +0,0 @@ -# Start the local chains - -In this chapter, you will learn how to spawn two Gaia chains, and use Hermes to relay packets between them. -To spawn the chains and configure Hermes accordingly, we will make use of script bundled in the `ibc-rs` repository. - -To this end, clone the `ibc-rs` repository and check out the current version: - -```bash -git clone git@github.com:informalsystems/ibc-rs.git -cd ibc-rs -git checkout v0.15.0 -``` - -### Stop existing `gaiad` processes - -If this is not the first time you are running the script, you can manually stop the two gaia instances executing the following command to kill all `gaiad` processes: - -```shell -killall gaiad -``` - -> __NOTE__: If you have any `Docker` containers running that might be using the same ports as `gaiad` (e.g. port 26657 or port 9090), please ensure you stop them first before proceeding to the next step. - -### Configuration file - -In order to run the script, you will need a `TOML` configuration file to be passed as a parameter. Please check the [`Configuration`](../../config.md) section for more information about the relayer configuration file. - -The following configuration file in the `ibc-rs` repository folder can be used for running the local chains: - -__config.toml__ - -```toml -{{#include ../../../../config.toml}} -``` - -#### Saving the configuration file - -##### Create the config.toml file - -```shell -mkdir -p $HOME/.hermes && touch $HOME/.hermes/config.toml -``` - -##### Add content to the configuration file: - -You can use your preferred text editor. If using `vi` you can run: - -```shell -vi ~/.hermes/config.toml -``` - -Then just __`copy`__ the content for `config.toml` above and __`paste`__ into this file. - -### Generating private keys -Next, we will need to associate a private key with chains `ibc-0` and `ibc-1` which `hermes` will use to sign transactions. There -are two steps involved. For each chain, -1. Generate a *Key Seed file* using `gaiad` -2. Use the *Key Seed file* to associate the corresponding private key with the chain - -In this tutorial, we will only generate a single *Key Seed file*, which we will use with both chains. - -#### Generate a Key Seed file -We will generate the Key Seed file for a key that we will call `testkey`. - -```shell -gaiad keys add testkey --output json -``` -This will generate an output similar to the one below (albeit all on the same line): -```json -{ - "name": "testkey", - "type": "local", - "address": "cosmos1tc3vcuxyyac0dmayf887t95tdg7qpyql48w7gj", - "pubkey": "cosmospub1addwnpepqgg7ng4ycm60pdxfzdfh4hjvkwcr3da59mr8k883vsstx60ruv7kur4525u", - "mnemonic": "[24 words mnemonic]" -} -``` - -Next, copy and paste the output to a new file called `key_seed.json`. This file contains all the information necessary for `hermes` to -derive a private key from. - -#### Associate a private key with each chain -Our config file specifies two chains: `ibc-0` and `ibc-1`. We will need to specify a private key that `hermes` will use for each chain. As -previously mentioned, in this tutorial we will use the same private key for both chains. - -```shell -hermes keys add ibc-0 -f key_seed.json -hermes keys add ibc-1 -f key_seed.json -``` -If successful, both commands should show an output similar to: - -``` -Success: Added key testkey ([ADDRESS]) on [CHAIN ID] chain -``` - -And that's it! `hermes` will now be able to sign transactions to be sent to both chains. `key_seed.json` can safely be disposed of. - -### Running the script to start the chains - -From the `ibc-rs` repository folder run the following script with the parameters below to start the chains (`ibc-0` and `ibc-1`) -and import the signing keys into the keyring: - -```bash -./scripts/dev-env ~/.hermes/config.toml ibc-0 ibc-1 -``` - -> __NOTE__: If the script above prompts you to delete the data folder just answer __'yes'__ - -The script configures and starts two __`gaiad`__ instances, one named __`ibc-0`__ and the other __`ibc-1`__ - -```mermaid -graph TD - A[dev-env] -->|run| C(start chains) - C -->|gaiad| D[ibc-0] - C -->|gaiad| F[ibc-1] -``` - -If the script runs successfully you should see a message similar to the one below in the terminal: - -```shell -GAIA VERSION INFO: v4.2.1 -Generating gaia configurations... -Creating gaiad instance: home=./data | chain-id=ibc-0 | p2p=:26656 | rpc=:26657 | profiling=:6060 | grpc=:9090 | samoleans=:100000000000 -Change settings in config.toml file... -Start gaia on grpc port: 9090... -Balances for validator 'cosmos15cugtww7rwmayvshfznuxam55jsv23xh3jdeqv' @ 'tcp://localhost:26657' -balances: -- amount: "0" - denom: stake -pagination: - next_key: null - total: "0" -Balances for user 'cosmos1usn8g2rj9q48y245pql9589zf9m8srcpxtzklg' @ 'tcp://localhost:26657' -balances: -- amount: "100000000000" - denom: samoleans -- amount: "100000000000" - denom: stake -pagination: - next_key: null - total: "0" -Creating gaiad instance: home=./data | chain-id=ibc-1 | p2p=:26556 | rpc=:26557 | profiling=:6061 | grpc=:9091 | samoleans=:100000000000 -Change settings in config.toml file... -Start gaia on grpc port: 9091... -Balances for validator 'cosmos1zdmr04w7c04ef4vkuur9c0vyvl78q45qjncmja' @ 'tcp://localhost:26557' -balances: -- amount: "0" - denom: stake -pagination: - next_key: null - total: "0" -Balances for user 'cosmos12p6k2dta0lsd6n80tpz34yepfpv7u7fvedm5mp' @ 'tcp://localhost:26557' -balances: -- amount: "100000000000" - denom: samoleans -- amount: "100000000000" - denom: stake -pagination: - next_key: null - total: "0" -ibc-0 initialized. Watch file /Users/ancaz/rust/ibc-rs/data/ibc-0.log to see its execution. -ibc-1 initialized. Watch file /Users/ancaz/rust/ibc-rs/data/ibc-1.log to see its execution. -Building the Rust relayer... -Importing keys... -Success: Added key 'testkey' (cosmos1usn8g2rj9q48y245pql9589zf9m8srcpxtzklg) on chain ibc-0 -Success: Added key 'testkey' (cosmos12p6k2dta0lsd6n80tpz34yepfpv7u7fvedm5mp) on chain ibc-1 -Done! -``` - -### Data directory -The script creates a __`data`__ directory in the current directory in order. The __`data`__ directory contains the chain stores and configuration files. - -The __`data`__ directory has a tree structure similar to the one below: - -```shell -data -├── ibc-0 -│   ├── config -│   ├── data -│   ├── keyring-test -│   ├── user_seed.json -│   ├── user2_seed.json -│   └── validator_seed.json -├── ibc-0.log -├── ibc-1 -│   ├── config -│   ├── data -│   ├── keyring-test -│   ├── user_seed.json -│   ├── user2_seed.json -│   └── validator_seed.json -└── ibc-1.log - -``` - -> __Tip__: You can use the command `tree ./data/ -L 2` to view the folder structure above: - -### $HOME/.hermes directory - -By the default `hermes` expects the configuration file to be in the __`$HOME/.hermes`__ folder. - -It also stores the private keys for each chain in this folder as outlined in the [Keys](../../commands/keys/index.md) section. - -After executing the __`dev-env`__ script, this is how the folder should look like: - -```shell -$HOME/.hermes/ -├── config.toml -└── keys - ├── ibc-0 - │   └── keyring-test - │   └── testkey.json - └── ibc-1 - └── keyring-test - └── testkey.json -``` - -#### Next Steps - -[The next section](./identifiers.md) describes how identifers for clients, connections and channels -are allocated, and will walk you through how to pre-allocate some identifers -to help matching them with their corresponding chains for the purpose of this tutorial. diff --git a/guide/theme/css/chrome.css b/guide/theme/css/chrome.css deleted file mode 100644 index 825b2bd029..0000000000 --- a/guide/theme/css/chrome.css +++ /dev/null @@ -1,495 +0,0 @@ -/* CSS for UI elements (a.k.a. chrome) */ - -@import 'variables.css'; - -::-webkit-scrollbar { - background: var(--bg); -} -::-webkit-scrollbar-thumb { - background: var(--scrollbar); -} -html { - scrollbar-color: var(--scrollbar) var(--bg); -} -#searchresults a, -.content a:link, -a:visited, -a > .hljs { - color: var(--links); -} - -/* Menu Bar */ - -#menu-bar, -#menu-bar-hover-placeholder { - z-index: 101; - margin: auto calc(0px - var(--page-padding)); -} -#menu-bar { - position: relative; - display: flex; - flex-wrap: wrap; - background-color: var(--bg); - border-bottom-color: var(--bg); - border-bottom-width: 1px; - border-bottom-style: solid; -} -#menu-bar.sticky, -.js #menu-bar-hover-placeholder:hover + #menu-bar, -.js #menu-bar:hover, -.js.sidebar-visible #menu-bar { - position: -webkit-sticky; - position: sticky; - top: 0 !important; -} -#menu-bar-hover-placeholder { - position: sticky; - position: -webkit-sticky; - top: 0; - height: var(--menu-bar-height); -} -#menu-bar.bordered { - border-bottom-color: var(--table-border-color); -} -#menu-bar i, #menu-bar .icon-button { - position: relative; - padding: 0 8px; - z-index: 10; - line-height: var(--menu-bar-height); - cursor: pointer; - transition: color 0.5s; -} -@media only screen and (max-width: 420px) { - #menu-bar i, #menu-bar .icon-button { - padding: 0 5px; - } -} - -.icon-button { - border: none; - background: none; - padding: 0; - color: inherit; -} -.icon-button i { - margin: 0; -} - -.right-buttons { - margin: 0 15px; -} -.right-buttons a { - text-decoration: none; -} - -.left-buttons { - display: flex; - margin: 0 5px; -} -.no-js .left-buttons { - display: none; -} - -.menu-title { - display: inline-block; - font-weight: 200; - font-size: 2rem; - line-height: var(--menu-bar-height); - text-align: center; - margin: 0; - flex: 1; - white-space: nowrap; - overflow: hidden; - text-overflow: ellipsis; -} -.js .menu-title { - cursor: pointer; -} - -.menu-bar, -.menu-bar:visited, -.nav-chapters, -.nav-chapters:visited, -.mobile-nav-chapters, -.mobile-nav-chapters:visited, -.menu-bar .icon-button, -.menu-bar a i { - color: var(--icons); -} - -.menu-bar i:hover, -.menu-bar .icon-button:hover, -.nav-chapters:hover, -.mobile-nav-chapters i:hover { - color: var(--icons-hover); -} - -/* Nav Icons */ - -.nav-chapters { - font-size: 2.5em; - text-align: center; - text-decoration: none; - - position: fixed; - top: 0; - bottom: 0; - margin: 0; - max-width: 150px; - min-width: 90px; - - display: flex; - justify-content: center; - align-content: center; - flex-direction: column; - - transition: color 0.5s, background-color 0.5s; -} - -.nav-chapters:hover { - text-decoration: none; - background-color: var(--theme-hover); - transition: background-color 0.15s, color 0.15s; -} - -.nav-wrapper { - margin-top: 50px; - display: none; -} - -.mobile-nav-chapters { - font-size: 2.5em; - text-align: center; - text-decoration: none; - width: 90px; - border-radius: 5px; - background-color: var(--nav-chapter); -} - -.previous { - float: left; -} - -.next { - float: right; - right: var(--page-padding); -} - -@media only screen and (max-width: 1080px) { - .nav-wide-wrapper { display: none; } - .nav-wrapper { display: block; } -} - -@media only screen and (max-width: 1380px) { - .sidebar-visible .nav-wide-wrapper { display: none; } - .sidebar-visible .nav-wrapper { display: block; } -} - -/* Inline code */ - -:not(pre) > .hljs { - display: inline; - padding: 0.1em 0.3em; - border-radius: 3px; -} - -:not(pre):not(a) > .hljs { - color: var(--inline-code-color); - overflow-x: initial; -} - -a:hover > .hljs { - text-decoration: underline; -} - -pre { - position: relative; -} -pre > .buttons { - position: absolute; - z-index: 100; - right: 5px; - top: 5px; - - color: var(--buttons); - cursor: pointer; -} -pre > .buttons :hover { - color: var(--buttons-active); -} -pre > .buttons i { - margin-left: 8px; -} -pre > .buttons button { - color: inherit; - background: transparent; - border: none; - cursor: inherit; -} -pre > .result { - margin-top: 10px; -} - -/* Search */ - -#searchresults a { - text-decoration: none; -} - -mark { - border-radius: 2px; - padding: 0 3px 1px 3px; - margin: 0 -3px -1px -3px; - background-color: var(--search-mark-bg); - transition: background-color 300ms linear; - cursor: pointer; -} - -mark.fade-out { - background-color: rgba(0,0,0,0) !important; - cursor: auto; -} - -.searchbar-outer { - margin-left: auto; - margin-right: auto; - max-width: var(--content-max-width); -} - -#searchbar { - width: 100%; - margin: 5px auto 0px auto; - padding: 10px 16px; - transition: box-shadow 300ms ease-in-out; - border: 1px solid var(--searchbar-border-color); - border-radius: 3px; - background-color: var(--searchbar-bg); - color: var(--searchbar-fg); -} -#searchbar:focus, -#searchbar.active { - box-shadow: 0 0 3px var(--searchbar-shadow-color); -} - -.searchresults-header { - font-weight: bold; - font-size: 1em; - padding: 18px 0 0 5px; - color: var(--searchresults-header-fg); -} - -.searchresults-outer { - margin-left: auto; - margin-right: auto; - max-width: var(--content-max-width); - border-bottom: 1px dashed var(--searchresults-border-color); -} - -ul#searchresults { - list-style: none; - padding-left: 20px; -} -ul#searchresults li { - margin: 10px 0px; - padding: 2px; - border-radius: 2px; -} -ul#searchresults li.focus { - background-color: var(--searchresults-li-bg); -} -ul#searchresults span.teaser { - display: block; - clear: both; - margin: 5px 0 0 20px; - font-size: 0.8em; -} -ul#searchresults span.teaser em { - font-weight: bold; - font-style: normal; -} - -/* Sidebar */ - -.sidebar { - position: fixed; - left: 0; - top: 0; - bottom: 0; - width: var(--sidebar-width); - font-size: 0.875em; - box-sizing: border-box; - -webkit-overflow-scrolling: touch; - overscroll-behavior-y: contain; - background-color: var(--sidebar-bg); - color: var(--sidebar-fg); -} -.sidebar-resizing { - -moz-user-select: none; - -webkit-user-select: none; - -ms-user-select: none; - user-select: none; -} -.js:not(.sidebar-resizing) .sidebar { - transition: transform 0.3s; /* Animation: slide away */ -} -.sidebar code { - line-height: 2em; -} -.sidebar .sidebar-scrollbox { - overflow-y: auto; - position: absolute; - top: 0; - bottom: 0; - left: 0; - right: 0; - padding: 10px 10px; -} -.sidebar .sidebar-resize-handle { - position: absolute; - cursor: col-resize; - width: 0; - right: 0; - top: 0; - bottom: 0; -} -.js .sidebar .sidebar-resize-handle { - cursor: col-resize; - width: 5px; -} -.sidebar-hidden .sidebar { - transform: translateX(calc(0px - var(--sidebar-width))); -} -.sidebar::-webkit-scrollbar { - background: var(--sidebar-bg); -} -.sidebar::-webkit-scrollbar-thumb { - background: var(--scrollbar); -} - -.sidebar-visible .page-wrapper { - transform: translateX(var(--sidebar-width)); -} -@media only screen and (min-width: 620px) { - .sidebar-visible .page-wrapper { - transform: none; - margin-left: var(--sidebar-width); - } -} - -.chapter { - list-style: none outside none; - padding-left: 0; - line-height: 2.2em; -} - -.chapter ol { - width: 100%; -} - -.chapter li { - display: flex; - color: var(--sidebar-non-existant); -} -.chapter li a { - display: block; - padding: 0; - text-decoration: none; - color: var(--sidebar-fg); -} - -.chapter li a:hover { - color: var(--sidebar-active); -} - -.chapter li a.active { - color: var(--sidebar-active); -} - -.chapter li > a.toggle { - cursor: pointer; - display: block; - margin-left: auto; - padding: 0 10px; - user-select: none; - opacity: 0.68; -} - -.chapter li > a.toggle div { - transition: transform 0.5s; -} - -/* collapse the section */ -.chapter li:not(.expanded) + li > ol { - display: none; -} - -.chapter li.chapter-item { - line-height: 1.5em; - margin-top: 0.6em; -} - -.chapter li.expanded > a.toggle div { - transform: rotate(90deg); -} - -.spacer { - width: 100%; - height: 3px; - margin: 5px 0px; -} -.chapter .spacer { - background-color: var(--sidebar-spacer); -} - -@media (-moz-touch-enabled: 1), (pointer: coarse) { - .chapter li a { padding: 5px 0; } - .spacer { margin: 10px 0; } -} - -.section { - list-style: none outside none; - padding-left: 20px; - line-height: 1.9em; -} - -/* Theme Menu Popup */ - -.theme-popup { - position: absolute; - left: 10px; - top: var(--menu-bar-height); - z-index: 1000; - border-radius: 4px; - font-size: 0.7em; - color: var(--fg); - background: var(--theme-popup-bg); - border: 1px solid var(--theme-popup-border); - margin: 0; - padding: 0; - list-style: none; - display: none; -} -.theme-popup .default { - color: var(--icons); -} -.theme-popup .theme { - width: 100%; - border: 0; - margin: 0; - padding: 2px 10px; - line-height: 25px; - white-space: nowrap; - text-align: left; - cursor: pointer; - color: inherit; - background: inherit; - font-size: inherit; -} -.theme-popup .theme:hover { - background-color: var(--theme-hover); -} -.theme-popup .theme:hover:first-child, -.theme-popup .theme:hover:last-child { - border-top-left-radius: inherit; - border-top-right-radius: inherit; -} diff --git a/guide/theme/css/variables.css b/guide/theme/css/variables.css deleted file mode 100644 index 776cb8c9bf..0000000000 --- a/guide/theme/css/variables.css +++ /dev/null @@ -1,257 +0,0 @@ - -/* Globals */ - -:root { - --sidebar-width: 300px; - --page-padding: 15px; - --content-max-width: 750px; - --menu-bar-height: 50px; -} -/* Themes */ - -.ayu { - --bg: hsl(210, 25%, 8%); - --fg: #c5c5c5; - - --sidebar-bg: #14191f; - --sidebar-fg: #c8c9db; - --sidebar-non-existant: #5c6773; - --sidebar-active: #ffb454; - --sidebar-spacer: #2d334f; - - --scrollbar: var(--sidebar-fg); - - --icons: #737480; - --icons-hover: #b7b9cc; - - --links: #0096cf; - - --inline-code-color: #ffb454; - - --theme-popup-bg: #14191f; - --theme-popup-border: #5c6773; - --theme-hover: #191f26; - - --quote-bg: hsl(226, 15%, 17%); - --quote-border: hsl(226, 15%, 22%); - - --table-border-color: hsl(210, 25%, 13%); - --table-header-bg: hsl(210, 25%, 28%); - --table-alternate-bg: hsl(210, 25%, 11%); - - --searchbar-border-color: #848484; - --searchbar-bg: #424242; - --searchbar-fg: #fff; - --searchbar-shadow-color: #d4c89f; - --searchresults-header-fg: #666; - --searchresults-border-color: #888; - --searchresults-li-bg: #252932; - --search-mark-bg: #e3b171; -} - -.coal { - --bg: hsl(200, 7%, 8%); - --fg: #98a3ad; - - --sidebar-bg: #292c2f; - --sidebar-fg: #a1adb8; - --sidebar-non-existant: #505254; - --sidebar-active: #3473ad; - --sidebar-spacer: #393939; - - --scrollbar: var(--sidebar-fg); - - --icons: #43484d; - --icons-hover: #b3c0cc; - - --links: #2b79a2; - - --inline-code-color: #c5c8c6;; - - --theme-popup-bg: #141617; - --theme-popup-border: #43484d; - --theme-hover: #1f2124; - - --quote-bg: hsl(234, 21%, 18%); - --quote-border: hsl(234, 21%, 23%); - - --table-border-color: hsl(200, 7%, 13%); - --table-header-bg: hsl(200, 7%, 28%); - --table-alternate-bg: hsl(200, 7%, 11%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #b7b7b7; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #666; - --searchresults-border-color: #98a3ad; - --searchresults-li-bg: #2b2b2f; - --search-mark-bg: #355c7d; -} - -.light { - --bg: hsl(0, 0%, 97%); - --fg: #181818; - - --sidebar-bg: #335787; - --sidebar-fg: #ffffff; - --sidebar-non-existant: #aaaaaa; - --sidebar-active: #9cefff; - --sidebar-spacer: #e5e5e5; - - --buttons: #898989; - --buttons-active: #8faaef; - - --scrollbar: var(--sidebar-fg); - - --icons: #2f2f2f; - --icons-hover: #333333; - - --nav-chapter: #82aee8; - - --links: #267CB9; - - --inline-code-color: #2d2b26; - - --theme-popup-bg: #fafafa; - --theme-popup-border: #cccccc; - --theme-hover: #e6e6e6; - - --quote-bg: hsl(212, 52%, 86%); - --quote-border: hsl(220, 83%, 67%); - - --table-border-color: hsl(0, 0%, 95%); - --table-header-bg: hsl(0, 0%, 80%); - --table-alternate-bg: hsl(0, 0%, 97%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #fafafa; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #666; - --searchresults-border-color: #888; - --searchresults-li-bg: #e4f2fe; - --search-mark-bg: #a2cff5; -} - -.navy { - --bg: hsl(226, 23%, 11%); - --fg: #bcbdd0; - - --sidebar-bg: #282d3f; - --sidebar-fg: #c8c9db; - --sidebar-non-existant: #505274; - --sidebar-active: #2b79a2; - --sidebar-spacer: #2d334f; - - --scrollbar: var(--sidebar-fg); - - --icons: #737480; - --icons-hover: #b7b9cc; - - --links: #2b79a2; - - --inline-code-color: #c5c8c6;; - - --theme-popup-bg: #161923; - --theme-popup-border: #737480; - --theme-hover: #282e40; - - --quote-bg: hsl(226, 15%, 17%); - --quote-border: hsl(226, 15%, 22%); - - --table-border-color: hsl(226, 23%, 16%); - --table-header-bg: hsl(226, 23%, 31%); - --table-alternate-bg: hsl(226, 23%, 14%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #aeaec6; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #5f5f71; - --searchresults-border-color: #5c5c68; - --searchresults-li-bg: #242430; - --search-mark-bg: #a2cff5; -} - -.rust { - --bg: hsl(60, 9%, 87%); - --fg: #262625; - - --sidebar-bg: #3b2e2a; - --sidebar-fg: #c8c9db; - --sidebar-non-existant: #505254; - --sidebar-active: #e69f67; - --sidebar-spacer: #45373a; - - --scrollbar: var(--sidebar-fg); - - --icons: #737480; - --icons-hover: #262625; - - --links: #2b79a2; - - --inline-code-color: #6e6b5e; - - --theme-popup-bg: #e1e1db; - --theme-popup-border: #b38f6b; - --theme-hover: #99908a; - - --quote-bg: hsl(60, 5%, 75%); - --quote-border: hsl(60, 5%, 70%); - - --table-border-color: hsl(60, 9%, 82%); - --table-header-bg: #b3a497; - --table-alternate-bg: hsl(60, 9%, 84%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #fafafa; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #666; - --searchresults-border-color: #888; - --searchresults-li-bg: #dec2a2; - --search-mark-bg: #e69f67; -} - -@media (prefers-color-scheme: dark) { - .light.no-js { - --bg: hsl(200, 7%, 8%); - --fg: #98a3ad; - - --sidebar-bg: #292c2f; - --sidebar-fg: #a1adb8; - --sidebar-non-existant: #505254; - --sidebar-active: #3473ad; - --sidebar-spacer: #393939; - - --scrollbar: var(--sidebar-fg); - - --icons: #43484d; - --icons-hover: #b3c0cc; - - --links: #2b79a2; - - --inline-code-color: #c5c8c6;; - - --theme-popup-bg: #141617; - --theme-popup-border: #43484d; - --theme-hover: #1f2124; - - --quote-bg: hsl(234, 21%, 18%); - --quote-border: hsl(234, 21%, 23%); - - --table-border-color: hsl(200, 7%, 13%); - --table-header-bg: hsl(200, 7%, 28%); - --table-alternate-bg: hsl(200, 7%, 11%); - - --searchbar-border-color: #aaa; - --searchbar-bg: #b7b7b7; - --searchbar-fg: #000; - --searchbar-shadow-color: #aaa; - --searchresults-header-fg: #666; - --searchresults-border-color: #98a3ad; - --searchresults-li-bg: #2b2b2f; - --search-mark-bg: #355c7d; - } -} diff --git a/modules/Cargo.toml b/modules/Cargo.toml index 9a6b3374e1..2afe81cf70 100644 --- a/modules/Cargo.toml +++ b/modules/Cargo.toml @@ -7,7 +7,6 @@ readme = "README.md" keywords = ["blockchain", "consensus", "cosmos", "ibc", "tendermint"] repository = "https://github.com/informalsystems/ibc-rs" authors = ["Informal Systems "] -rust-version = "1.60" description = """ Implementation of the Inter-Blockchain Communication Protocol (IBC). This crate comprises the main data structures and on-chain logic. @@ -18,17 +17,29 @@ all-features = true [features] default = ["std"] -std = ["flex-error/std", "flex-error/eyre_tracer", "ibc-proto/std", "clock"] +std = [ + "flex-error/std", + "flex-error/eyre_tracer", + "ibc-proto/std", + "ics23/std", + "clock", + "sp-core/std", + "sp-std/std", + "codec/std", + "tendermint-rpc" +] clock = ["tendermint/clock", "time/std"] # This feature grants access to development-time mocking libraries, such as `MockContext` or `MockHeader`. # Depends on the `testgen` suite for generating Tendermint light blocks. -mocks = ["tendermint-testgen", "clock", "std"] +mocks = ["clock", "std", "sha2"] [dependencies] # Proto definitions for all IBC-related interfaces, e.g., connections or channels. ibc-proto = { version = "0.18.0", path = "../proto", default-features = false } -ics23 = { version = "=0.8.0-alpha", default-features = false } +derive = { path = "../derive", package = "ibc-derive" } +borsh = { version = "0.9.3", default-features = false } +ics23 = { git = "https://github.com/composablefi/ics23", rev = "b500a5c6068eb53c83c4c6c13bd9d8c25e0bf927", default-features = false } time = { version = "0.3", default-features = false } serde_derive = { version = "1.0.104", default-features = false } serde = { version = "1.0", default-features = false } @@ -36,31 +47,30 @@ serde_json = { version = "1", default-features = false } tracing = { version = "0.1.34", default-features = false } prost = { version = "0.10", default-features = false } prost-types = { version = "0.10", default-features = false } -bytes = { version = "1.1.0", default-features = false } safe-regex = { version = "0.2.5", default-features = false } subtle-encoding = { version = "0.5", default-features = false } -sha2 = { version = "0.10.2", default-features = false } flex-error = { version = "0.4.4", default-features = false } num-traits = { version = "0.2.15", default-features = false } derive_more = { version = "0.99.17", default-features = false, features = ["from", "into", "display"] } uint = { version = "0.9", default-features = false } +primitive-types = { version = "0.11.1", default-features = false, features = ["serde_no_std"] } +codec = { package = "parity-scale-codec", version = "3.0.0", default-features = false, features = ["derive"] } +sha2 = { version = "0.10.2", optional = true } [dependencies.tendermint] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" default-features = false [dependencies.tendermint-proto] -version = "=0.23.7" +git = "https://github.com/composableFi/tendermint-rs" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" default-features = false -[dependencies.tendermint-light-client-verifier] -version = "=0.23.7" -default-features = false - -[dependencies.tendermint-testgen] -version = "=0.23.7" +[dependencies.tendermint-rpc] +git = "https://github.com/composableFi/tendermint-rs" +rev = "5a74e0f8da4d3dab83cc04b5f1363b018cf3d9e8" optional = true -default-features = false [dev-dependencies] env_logger = "0.9.0" @@ -68,10 +78,8 @@ tracing-subscriber = { version = "0.3.11", features = ["fmt", "env-filter", "jso test-log = { version = "0.2.10", features = ["trace"] } modelator = "0.4.2" sha2 = { version = "0.10.2" } -tendermint-rpc = { version = "=0.23.7", features = ["http-client", "websocket-client"] } -tendermint-testgen = { version = "=0.23.7" } # Needed for generating (synthetic) light blocks. - -[[test]] -name = "mbt" -path = "tests/mbt.rs" -required-features = ["mocks"] +tokio = { version = "1.17.0", features = ["full"] } +serde_json = "1.0.74" +sha3 = { version = "0.10.1" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } +sp-std = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.24" } diff --git a/modules/src/applications/transfer/acknowledgement.rs b/modules/src/applications/transfer/acknowledgement.rs index cd7a6272dd..39072dff59 100644 --- a/modules/src/applications/transfer/acknowledgement.rs +++ b/modules/src/applications/transfer/acknowledgement.rs @@ -1,6 +1,5 @@ use super::error::Error; -use crate::core::ics26_routing::context::Acknowledgement as AckTrait; -use crate::prelude::*; +use crate::{core::ics26_routing::context::Acknowledgement as AckTrait, prelude::*}; use core::fmt::{Display, Formatter}; use serde::{Deserialize, Deserializer}; @@ -12,50 +11,50 @@ pub const ACK_SUCCESS_B64: &[u8] = b"AQ=="; #[derive(Clone, Debug)] pub enum Acknowledgement { - /// Equivalent to b"AQ==" (i.e. `base64::encode(0x01)`) - Success(Vec), - /// Error Acknowledgement - Error(String), + /// Equivalent to b"AQ==" (i.e. `base64::encode(0x01)`) + Success(Vec), + /// Error Acknowledgement + Error(String), } impl Acknowledgement { - pub fn success() -> Self { - Self::Success(ACK_SUCCESS_B64.to_vec()) - } + pub fn success() -> Self { + Self::Success(ACK_SUCCESS_B64.to_vec()) + } - pub fn from_error(err: Error) -> Self { - Self::Error(format!("{}: {}", ACK_ERR_STR, err)) - } + pub fn from_error(err: Error) -> Self { + Self::Error(format!("{}: {}", ACK_ERR_STR, err)) + } } impl AsRef<[u8]> for Acknowledgement { - fn as_ref(&self) -> &[u8] { - match self { - Acknowledgement::Success(b) => b.as_slice(), - Acknowledgement::Error(s) => s.as_bytes(), - } - } + fn as_ref(&self) -> &[u8] { + match self { + Acknowledgement::Success(b) => b.as_slice(), + Acknowledgement::Error(s) => s.as_bytes(), + } + } } impl<'de> Deserialize<'de> for Acknowledgement { - fn deserialize>(deserializer: D) -> Result { - let s = String::deserialize(deserializer)?; - let ack = if s.as_bytes() == ACK_SUCCESS_B64 { - Self::Success(ACK_SUCCESS_B64.to_vec()) - } else { - Self::Error(s) - }; - Ok(ack) - } + fn deserialize>(deserializer: D) -> Result { + let s = String::deserialize(deserializer)?; + let ack = if s.as_bytes() == ACK_SUCCESS_B64 { + Self::Success(ACK_SUCCESS_B64.to_vec()) + } else { + Self::Error(s) + }; + Ok(ack) + } } impl Display for Acknowledgement { - fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { - match self { - Acknowledgement::Success(_) => write!(f, "AQ=="), - Acknowledgement::Error(err_str) => write!(f, "{}", err_str), - } - } + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + match self { + Acknowledgement::Success(_) => write!(f, "AQ=="), + Acknowledgement::Error(err_str) => write!(f, "{}", err_str), + } + } } impl AckTrait for Acknowledgement {} diff --git a/modules/src/applications/transfer/context.rs b/modules/src/applications/transfer/context.rs index 13220ca87b..7153bac7be 100644 --- a/modules/src/applications/transfer/context.rs +++ b/modules/src/applications/transfer/context.rs @@ -1,348 +1,367 @@ -use sha2::{Digest, Sha256}; use subtle_encoding::hex; use super::error::Error as Ics20Error; -use crate::applications::transfer::acknowledgement::Acknowledgement; -use crate::applications::transfer::events::{AckEvent, AckStatusEvent, RecvEvent, TimeoutEvent}; -use crate::applications::transfer::packet::PacketData; -use crate::applications::transfer::relay::on_ack_packet::process_ack_packet; -use crate::applications::transfer::relay::on_recv_packet::process_recv_packet; -use crate::applications::transfer::relay::on_timeout_packet::process_timeout_packet; -use crate::applications::transfer::{PrefixedCoin, PrefixedDenom, VERSION}; -use crate::core::ics04_channel::channel::{Counterparty, Order}; -use crate::core::ics04_channel::context::{ChannelKeeper, ChannelReader}; -use crate::core::ics04_channel::msgs::acknowledgement::Acknowledgement as GenericAcknowledgement; -use crate::core::ics04_channel::packet::Packet; -use crate::core::ics04_channel::Version; -use crate::core::ics05_port::context::PortReader; -use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::{ModuleOutputBuilder, OnRecvPacketAck}; -use crate::prelude::*; -use crate::signer::Signer; +use crate::{ + applications::transfer::{ + acknowledgement::Acknowledgement, + events::{AckEvent, AckStatusEvent, RecvEvent, TimeoutEvent}, + packet::PacketData, + relay::{ + on_ack_packet::process_ack_packet, on_recv_packet::process_recv_packet, + on_timeout_packet::process_timeout_packet, + }, + PrefixedCoin, PrefixedDenom, VERSION, + }, + core::{ + ics04_channel::{ + channel::{Counterparty, Order}, + context::{ChannelKeeper, ChannelReader}, + msgs::acknowledgement::Acknowledgement as GenericAcknowledgement, + packet::Packet, + Version, + }, + ics24_host::identifier::{ChannelId, ConnectionId, PortId}, + ics26_routing::context::{ModuleOutputBuilder, OnRecvPacketAck, ReaderContext}, + }, + prelude::*, + signer::Signer, +}; pub trait Ics20Keeper: - ChannelKeeper + BankKeeper::AccountId> + ChannelKeeper + BankKeeper::AccountId> { - type AccountId; + type AccountId; } -pub trait Ics20Reader: ChannelReader + PortReader { - type AccountId: TryFrom; - - /// get_port returns the portID for the transfer module. - fn get_port(&self) -> Result; - - /// Returns the escrow account id for a port and channel combination - fn get_channel_escrow_address( - &self, - port_id: &PortId, - channel_id: ChannelId, - ) -> Result<::AccountId, Ics20Error> { - let hash = cosmos_adr028_escrow_address(port_id, channel_id); - String::from_utf8(hex::encode_upper(hash)) - .expect("hex encoded bytes are not valid UTF8") - .parse::() - .map_err(Ics20Error::signer)? - .try_into() - .map_err(|_| Ics20Error::parse_account_failure()) - } - - /// Returns true iff send is enabled. - fn is_send_enabled(&self) -> bool; - - /// Returns true iff receive is enabled. - fn is_receive_enabled(&self) -> bool; - - /// Returns a hash of the prefixed denom. - /// Implement only if the host chain supports hashed denominations. - fn denom_hash_string(&self, _denom: &PrefixedDenom) -> Option { - None - } +pub trait Ics20Reader: ChannelReader +where + Self: Sized, +{ + type AccountId: TryFrom; + + /// get_port returns the portID for the transfer module. + fn get_port(&self) -> Result; + + /// Returns the escrow account id for a port and channel combination + fn get_channel_escrow_address( + &self, + port_id: &PortId, + channel_id: ChannelId, + ) -> Result<::AccountId, Ics20Error> { + let hash = cosmos_adr028_escrow_address(self, port_id, channel_id); + + String::from_utf8(hex::encode_upper(hash)) + .expect("hex encoded bytes are not valid UTF8") + .parse::() + .map_err(Ics20Error::signer)? + .try_into() + .map_err(|_| Ics20Error::parse_account_failure()) + } + + /// Returns true iff send is enabled. + fn is_send_enabled(&self) -> bool; + + /// Returns true iff receive is enabled. + fn is_receive_enabled(&self) -> bool; + + /// Returns a hash of the prefixed denom. + /// Implement only if the host chain supports hashed denominations. + fn denom_hash_string(&self, _denom: &PrefixedDenom) -> Option { + None + } } // https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-028-public-key-addresses.md -fn cosmos_adr028_escrow_address(port_id: &PortId, channel_id: ChannelId) -> Vec { - let contents = format!("{}/{}", port_id, channel_id); - - let mut hasher = Sha256::new(); - hasher.update(VERSION.as_bytes()); - hasher.update([0]); - hasher.update(contents.as_bytes()); - - let mut hash = hasher.finalize().to_vec(); - hash.truncate(20); - hash +fn cosmos_adr028_escrow_address( + ctx: &dyn ChannelReader, + port_id: &PortId, + channel_id: ChannelId, +) -> Vec { + let contents = format!("{}/{}", port_id, channel_id); + let mut data = VERSION.as_bytes().to_vec(); + data.extend_from_slice(&[0]); + data.extend_from_slice(contents.as_bytes()); + + let mut hash = ctx.hash(data); + hash.truncate(20); + hash } pub trait BankKeeper { - type AccountId; - - /// This function should enable sending ibc fungible tokens from one account to another - fn send_coins( - &mut self, - from: &Self::AccountId, - to: &Self::AccountId, - amt: &PrefixedCoin, - ) -> Result<(), Ics20Error>; - - /// This function to enable minting ibc tokens to a user account - fn mint_coins( - &mut self, - account: &Self::AccountId, - amt: &PrefixedCoin, - ) -> Result<(), Ics20Error>; - - /// This function should enable burning of minted tokens in a user account - fn burn_coins( - &mut self, - account: &Self::AccountId, - amt: &PrefixedCoin, - ) -> Result<(), Ics20Error>; + type AccountId; + + /// This function should enable sending ibc fungible tokens from one account to another + fn send_coins( + &mut self, + from: &Self::AccountId, + to: &Self::AccountId, + amt: &PrefixedCoin, + ) -> Result<(), Ics20Error>; + + /// This function to enable minting ibc tokens to a user account + fn mint_coins( + &mut self, + account: &Self::AccountId, + amt: &PrefixedCoin, + ) -> Result<(), Ics20Error>; + + /// This function should enable burning of minted tokens in a user account + fn burn_coins( + &mut self, + account: &Self::AccountId, + amt: &PrefixedCoin, + ) -> Result<(), Ics20Error>; } /// Captures all the dependencies which the ICS20 module requires to be able to dispatch and /// process IBC messages. pub trait Ics20Context: - Ics20Keeper::AccountId> - + Ics20Reader::AccountId> + Ics20Keeper::AccountId> + + Ics20Reader::AccountId> + + ReaderContext { - type AccountId: TryFrom; + type AccountId: TryFrom; } fn validate_transfer_channel_params( - ctx: &mut impl Ics20Context, - order: Order, - port_id: &PortId, - channel_id: &ChannelId, - version: &Version, + ctx: &mut impl Ics20Context, + order: Order, + port_id: &PortId, + channel_id: &ChannelId, + version: &Version, ) -> Result<(), Ics20Error> { - if channel_id.sequence() > (u32::MAX as u64) { - return Err(Ics20Error::chan_seq_exceeds_limit(channel_id.sequence())); - } + if channel_id.sequence() > (u32::MAX as u64) { + return Err(Ics20Error::chan_seq_exceeds_limit(channel_id.sequence())); + } - if order != Order::Unordered { - return Err(Ics20Error::channel_not_unordered(order)); - } + if order != Order::Unordered { + return Err(Ics20Error::channel_not_unordered(order)); + } - let bound_port = ctx.get_port()?; - if port_id != &bound_port { - return Err(Ics20Error::invalid_port(port_id.clone(), bound_port)); - } + let bound_port = ctx.get_port()?; + if port_id != &bound_port { + return Err(Ics20Error::invalid_port(port_id.clone(), bound_port)); + } - if version != &Version::ics20() { - return Err(Ics20Error::invalid_version(version.clone())); - } + if version != &Version::ics20() { + return Err(Ics20Error::invalid_version(version.clone())); + } - Ok(()) + Ok(()) } fn validate_counterparty_version(counterparty_version: &Version) -> Result<(), Ics20Error> { - if counterparty_version == &Version::ics20() { - Ok(()) - } else { - Err(Ics20Error::invalid_counterparty_version( - counterparty_version.clone(), - )) - } + if counterparty_version == &Version::ics20() { + Ok(()) + } else { + Err(Ics20Error::invalid_counterparty_version(counterparty_version.clone())) + } } #[allow(clippy::too_many_arguments)] pub fn on_chan_open_init( - ctx: &mut impl Ics20Context, - _output: &mut ModuleOutputBuilder, - order: Order, - _connection_hops: &[ConnectionId], - port_id: &PortId, - channel_id: &ChannelId, - _counterparty: &Counterparty, - version: &Version, + ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + order: Order, + _connection_hops: &[ConnectionId], + port_id: &PortId, + channel_id: &ChannelId, + _counterparty: &Counterparty, + version: &Version, ) -> Result<(), Ics20Error> { - validate_transfer_channel_params(ctx, order, port_id, channel_id, version) + validate_transfer_channel_params(ctx, order, port_id, channel_id, version) } #[allow(clippy::too_many_arguments)] pub fn on_chan_open_try( - ctx: &mut impl Ics20Context, - _output: &mut ModuleOutputBuilder, - order: Order, - _connection_hops: &[ConnectionId], - port_id: &PortId, - channel_id: &ChannelId, - _counterparty: &Counterparty, - version: &Version, - counterparty_version: &Version, + ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + order: Order, + _connection_hops: &[ConnectionId], + port_id: &PortId, + channel_id: &ChannelId, + _counterparty: &Counterparty, + version: &Version, + counterparty_version: &Version, ) -> Result { - validate_transfer_channel_params(ctx, order, port_id, channel_id, version)?; - validate_counterparty_version(counterparty_version)?; - Ok(Version::ics20()) + validate_transfer_channel_params(ctx, order, port_id, channel_id, version)?; + validate_counterparty_version(counterparty_version)?; + Ok(Version::ics20()) } pub fn on_chan_open_ack( - _ctx: &mut impl Ics20Context, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, - counterparty_version: &Version, + _ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, + counterparty_version: &Version, ) -> Result<(), Ics20Error> { - validate_counterparty_version(counterparty_version)?; - Ok(()) + validate_counterparty_version(counterparty_version)?; + Ok(()) } pub fn on_chan_open_confirm( - _ctx: &mut impl Ics20Context, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, + _ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, ) -> Result<(), Ics20Error> { - Ok(()) + Ok(()) } pub fn on_chan_close_init( - _ctx: &mut impl Ics20Context, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, + _ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, ) -> Result<(), Ics20Error> { - Err(Ics20Error::cant_close_channel()) + Err(Ics20Error::cant_close_channel()) } pub fn on_chan_close_confirm( - _ctx: &mut impl Ics20Context, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, + _ctx: &mut impl Ics20Context, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, ) -> Result<(), Ics20Error> { - Ok(()) + Ok(()) } pub fn on_recv_packet( - ctx: &Ctx, - output: &mut ModuleOutputBuilder, - packet: &Packet, - _relayer: &Signer, + ctx: &Ctx, + output: &mut ModuleOutputBuilder, + packet: &Packet, + _relayer: &Signer, ) -> OnRecvPacketAck { - let data = match serde_json::from_slice::(&packet.data) { - Ok(data) => data, - Err(_) => { - return OnRecvPacketAck::Failed(Box::new(Acknowledgement::Error( - Ics20Error::packet_data_deserialization().to_string(), - ))) - } - }; - - let ack = match process_recv_packet(ctx, output, packet, data.clone()) { - Ok(write_fn) => OnRecvPacketAck::Successful(Box::new(Acknowledgement::success()), write_fn), - Err(e) => OnRecvPacketAck::Failed(Box::new(Acknowledgement::from_error(e))), - }; - - let recv_event = RecvEvent { - receiver: data.receiver, - denom: data.token.denom, - amount: data.token.amount, - success: ack.is_successful(), - }; - output.emit(recv_event.into()); - - ack + let data = match serde_json::from_slice::(&packet.data) { + Ok(data) => data, + Err(_) => { + return OnRecvPacketAck::Failed(Box::new(Acknowledgement::Error( + Ics20Error::packet_data_deserialization().to_string(), + ))) + }, + }; + + let ack = match process_recv_packet(ctx, output, packet, data.clone()) { + Ok(write_fn) => OnRecvPacketAck::Successful(Box::new(Acknowledgement::success()), write_fn), + Err(e) => OnRecvPacketAck::Failed(Box::new(Acknowledgement::from_error(e))), + }; + + let recv_event = RecvEvent { + receiver: data.receiver, + denom: data.token.denom, + amount: data.token.amount, + success: ack.is_successful(), + }; + output.emit(recv_event.into()); + + ack } pub fn on_acknowledgement_packet( - ctx: &mut impl Ics20Context, - output: &mut ModuleOutputBuilder, - packet: &Packet, - acknowledgement: &GenericAcknowledgement, - _relayer: &Signer, + ctx: &mut impl Ics20Context, + output: &mut ModuleOutputBuilder, + packet: &Packet, + acknowledgement: &GenericAcknowledgement, + _relayer: &Signer, ) -> Result<(), Ics20Error> { - let data = serde_json::from_slice::(&packet.data) - .map_err(|_| Ics20Error::packet_data_deserialization())?; + let data = serde_json::from_slice::(&packet.data) + .map_err(|_| Ics20Error::packet_data_deserialization())?; - let acknowledgement = serde_json::from_slice::(acknowledgement.as_ref()) - .map_err(|_| Ics20Error::ack_deserialization())?; + let acknowledgement = serde_json::from_slice::(acknowledgement.as_ref()) + .map_err(|_| Ics20Error::ack_deserialization())?; - process_ack_packet(ctx, packet, &data, &acknowledgement)?; + process_ack_packet(ctx, packet, &data, &acknowledgement)?; - let ack_event = AckEvent { - receiver: data.receiver, - denom: data.token.denom, - amount: data.token.amount, - acknowledgement: acknowledgement.clone(), - }; - output.emit(ack_event.into()); - output.emit(AckStatusEvent { acknowledgement }.into()); + let ack_event = AckEvent { + receiver: data.receiver, + denom: data.token.denom, + amount: data.token.amount, + acknowledgement: acknowledgement.clone(), + }; + output.emit(ack_event.into()); + output.emit(AckStatusEvent { acknowledgement }.into()); - Ok(()) + Ok(()) } pub fn on_timeout_packet( - ctx: &mut impl Ics20Context, - output: &mut ModuleOutputBuilder, - packet: &Packet, - _relayer: &Signer, + ctx: &mut impl Ics20Context, + output: &mut ModuleOutputBuilder, + packet: &Packet, + _relayer: &Signer, ) -> Result<(), Ics20Error> { - let data = serde_json::from_slice::(&packet.data) - .map_err(|_| Ics20Error::packet_data_deserialization())?; + let data = serde_json::from_slice::(&packet.data) + .map_err(|_| Ics20Error::packet_data_deserialization())?; - process_timeout_packet(ctx, packet, &data)?; + process_timeout_packet(ctx, packet, &data)?; - let timeout_event = TimeoutEvent { - refund_receiver: data.sender, - refund_denom: data.token.denom, - refund_amount: data.token.amount, - }; - output.emit(timeout_event.into()); + let timeout_event = TimeoutEvent { + refund_receiver: data.sender, + refund_denom: data.token.denom, + refund_amount: data.token.amount, + }; + output.emit(timeout_event.into()); - Ok(()) + Ok(()) } #[cfg(test)] pub(crate) mod test { - use subtle_encoding::bech32; - - use crate::applications::transfer::context::cosmos_adr028_escrow_address; - use crate::applications::transfer::error::Error as Ics20Error; - use crate::applications::transfer::msgs::transfer::MsgTransfer; - use crate::applications::transfer::relay::send_transfer::send_transfer; - use crate::applications::transfer::PrefixedCoin; - use crate::core::ics04_channel::error::Error; - use crate::handler::HandlerOutputBuilder; - use crate::prelude::*; - use crate::test_utils::DummyTransferModule; - - pub(crate) fn deliver( - ctx: &mut DummyTransferModule, - output: &mut HandlerOutputBuilder<()>, - msg: MsgTransfer, - ) -> Result<(), Error> { - send_transfer(ctx, output, msg).map_err(|e: Ics20Error| Error::app_module(e.to_string())) - } - - #[test] - fn test_cosmos_escrow_address() { - fn assert_eq_escrow_address(port_id: &str, channel_id: &str, address: &str) { - let port_id = port_id.parse().unwrap(); - let channel_id = channel_id.parse().unwrap(); - let gen_address = { - let addr = cosmos_adr028_escrow_address(&port_id, channel_id); - bech32::encode("cosmos", addr) - }; - assert_eq!(gen_address, address.to_owned()) - } - - // addresses obtained using `gaiad query ibc-transfer escrow-address [port-id] [channel-id]` - assert_eq_escrow_address( - "transfer", - "channel-141", - "cosmos1x54ltnyg88k0ejmk8ytwrhd3ltm84xehrnlslf", - ); - assert_eq_escrow_address( - "transfer", - "channel-207", - "cosmos1ju6tlfclulxumtt2kglvnxduj5d93a64r5czge", - ); - assert_eq_escrow_address( - "transfer", - "channel-187", - "cosmos177x69sver58mcfs74x6dg0tv6ls4s3xmmcaw53", - ); - } + use std::sync::Mutex; + + use std::sync::Arc; + use subtle_encoding::bech32; + + use crate::{ + applications::transfer::{ + context::cosmos_adr028_escrow_address, error::Error as Ics20Error, + msgs::transfer::MsgTransfer, relay::send_transfer::send_transfer, PrefixedCoin, + }, + core::ics04_channel::error::Error, + handler::HandlerOutputBuilder, + mock::context::{HostBlockType, MockClientTypes, MockIbcStore}, + prelude::*, + test_utils::DummyTransferModule, + }; + + pub(crate) fn deliver( + ctx: &mut DummyTransferModule, + output: &mut HandlerOutputBuilder<()>, + msg: MsgTransfer, + ) -> Result<(), Error> { + send_transfer(ctx, output, msg).map_err(|e: Ics20Error| Error::app_module(e.to_string())) + } + + #[test] + fn test_cosmos_escrow_address() { + fn assert_eq_escrow_address(port_id: &str, channel_id: &str, address: &str) { + let port_id = port_id.parse().unwrap(); + let channel_id = channel_id.parse().unwrap(); + let gen_address = { + let ibc_store = MockIbcStore::::default(); + let ctx = DummyTransferModule::new(Arc::new(Mutex::new(ibc_store))); + let addr = cosmos_adr028_escrow_address(&ctx, &port_id, channel_id); + bech32::encode("cosmos", addr) + }; + assert_eq!(gen_address, address.to_owned()) + } + + // addresses obtained using `gaiad query ibc-transfer escrow-address [port-id] [channel-id]` + assert_eq_escrow_address( + "transfer", + "channel-141", + "cosmos1x54ltnyg88k0ejmk8ytwrhd3ltm84xehrnlslf", + ); + assert_eq_escrow_address( + "transfer", + "channel-207", + "cosmos1ju6tlfclulxumtt2kglvnxduj5d93a64r5czge", + ); + assert_eq_escrow_address( + "transfer", + "channel-187", + "cosmos177x69sver58mcfs74x6dg0tv6ls4s3xmmcaw53", + ); + } } diff --git a/modules/src/applications/transfer/denom.rs b/modules/src/applications/transfer/denom.rs index 998f731a40..ba9d35c7f0 100644 --- a/modules/src/applications/transfer/denom.rs +++ b/modules/src/applications/transfer/denom.rs @@ -1,16 +1,19 @@ -use core::fmt; -use core::str::FromStr; +use core::{fmt, str::FromStr}; use derive_more::{Display, From, Into}; -use ibc_proto::cosmos::base::v1beta1::Coin as RawCoin; -use ibc_proto::ibc::applications::transfer::v1::DenomTrace as RawDenomTrace; +use ibc_proto::{ + cosmos::base::v1beta1::Coin as RawCoin, + ibc::applications::transfer::v1::DenomTrace as RawDenomTrace, +}; use serde::{Deserialize, Serialize}; use super::error::Error; -use crate::bigint::U256; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::prelude::*; -use crate::serializers::serde_string; +use crate::{ + bigint::U256, + core::ics24_host::identifier::{ChannelId, PortId}, + prelude::*, + serializers::serde_string, +}; /// A `Coin` type with fully qualified `PrefixedDenom`. pub type PrefixedCoin = Coin; @@ -23,37 +26,40 @@ pub type BaseCoin = Coin; #[serde(transparent)] pub struct BaseDenom(String); +impl BaseDenom { + pub fn as_str(&self) -> &str { + &self.0 + } +} + impl FromStr for BaseDenom { - type Err = Error; - - fn from_str(s: &str) -> Result { - if s.trim().is_empty() { - Err(Error::empty_base_denom()) - } else { - Ok(BaseDenom(s.to_owned())) - } - } + type Err = Error; + + fn from_str(s: &str) -> Result { + if s.trim().is_empty() { + Err(Error::empty_base_denom()) + } else { + Ok(BaseDenom(s.to_owned())) + } + } } #[derive(Clone, Debug, Ord, PartialOrd, Eq, PartialEq)] pub struct TracePrefix { - port_id: PortId, - channel_id: ChannelId, + port_id: PortId, + channel_id: ChannelId, } impl TracePrefix { - pub fn new(port_id: PortId, channel_id: ChannelId) -> Self { - Self { - port_id, - channel_id, - } - } + pub fn new(port_id: PortId, channel_id: ChannelId) -> Self { + Self { port_id, channel_id } + } } impl fmt::Display for TracePrefix { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}/{}", self.port_id, self.channel_id) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}/{}", self.port_id, self.channel_id) + } } /// A full trace path modelled as a collection of `TracePrefix`s. @@ -64,103 +70,109 @@ impl fmt::Display for TracePrefix { pub struct TracePath(Vec); impl TracePath { - /// Returns true iff this path starts with the specified prefix - pub fn starts_with(&self, prefix: &TracePrefix) -> bool { - self.0.last().map(|p| p == prefix).unwrap_or(false) - } - - /// Removes the specified prefix from the path if there is a match, otherwise does nothing. - pub fn remove_prefix(&mut self, prefix: &TracePrefix) { - if self.starts_with(prefix) { - self.0.pop(); - } - } - - /// Adds the specified prefix to the path. - pub fn add_prefix(&mut self, prefix: TracePrefix) { - self.0.push(prefix) - } - - /// Returns true if the path is empty and false otherwise. - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } + /// Returns true iff this path starts with the specified prefix + pub fn starts_with(&self, prefix: &TracePrefix) -> bool { + self.0.last().map(|p| p == prefix).unwrap_or(false) + } + + /// Removes the specified prefix from the path if there is a match, otherwise does nothing. + pub fn remove_prefix(&mut self, prefix: &TracePrefix) { + if self.starts_with(prefix) { + self.0.pop(); + } + } + + /// Adds the specified prefix to the path. + pub fn add_prefix(&mut self, prefix: TracePrefix) { + self.0.push(prefix) + } + + /// Returns true if the path is empty and false otherwise. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } } impl<'a> TryFrom> for TracePath { - type Error = Error; - - fn try_from(v: Vec<&'a str>) -> Result { - if v.len() % 2 != 0 { - return Err(Error::invalid_trace_length(v.len())); - } - - let mut trace = vec![]; - let id_pairs = v.chunks_exact(2).map(|paths| (paths[0], paths[1])); - for (pos, (port_id, channel_id)) in id_pairs.rev().enumerate() { - let port_id = - PortId::from_str(port_id).map_err(|e| Error::invalid_trace_port_id(pos, e))?; - let channel_id = ChannelId::from_str(channel_id) - .map_err(|e| Error::invalid_trace_channel_id(pos, e))?; - trace.push(TracePrefix { - port_id, - channel_id, - }); - } - - Ok(trace.into()) - } + type Error = Error; + + fn try_from(v: Vec<&'a str>) -> Result { + if v.len() % 2 != 0 { + return Err(Error::invalid_trace_length(v.len())) + } + + let mut trace = vec![]; + let id_pairs = v.chunks_exact(2).map(|paths| (paths[0], paths[1])); + for (pos, (port_id, channel_id)) in id_pairs.rev().enumerate() { + let port_id = + PortId::from_str(port_id).map_err(|e| Error::invalid_trace_port_id(pos, e))?; + let channel_id = ChannelId::from_str(channel_id) + .map_err(|e| Error::invalid_trace_channel_id(pos, e))?; + trace.push(TracePrefix { port_id, channel_id }); + } + + Ok(trace.into()) + } } impl FromStr for TracePath { - type Err = Error; - - fn from_str(s: &str) -> Result { - let parts = { - let parts: Vec<&str> = s.split('/').collect(); - if parts.len() == 1 && parts[0].trim().is_empty() { - vec![] - } else { - parts - } - }; - parts.try_into() - } + type Err = Error; + + fn from_str(s: &str) -> Result { + let parts = { + let parts: Vec<&str> = s.split('/').collect(); + if parts.len() == 1 && parts[0].trim().is_empty() { + vec![] + } else { + parts + } + }; + parts.try_into() + } } impl fmt::Display for TracePath { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let path = self - .0 - .iter() - .rev() - .map(|prefix| prefix.to_string()) - .collect::>() - .join("/"); - write!(f, "{}", path) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let path = self + .0 + .iter() + .rev() + .map(|prefix| prefix.to_string()) + .collect::>() + .join("/"); + write!(f, "{}", path) + } } /// A type that contains the base denomination for ICS20 and the source tracing information path. #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] pub struct PrefixedDenom { - /// A series of `{port-id}/{channel-id}`s for tracing the source of the token. - #[serde(with = "serde_string")] - trace_path: TracePath, - /// Base denomination of the relayed fungible token. - base_denom: BaseDenom, + /// A series of `{port-id}/{channel-id}`s for tracing the source of the token. + #[serde(with = "serde_string")] + trace_path: TracePath, + /// Base denomination of the relayed fungible token. + base_denom: BaseDenom, } impl PrefixedDenom { - /// Removes the specified prefix from the trace path if there is a match, otherwise does nothing. - pub fn remove_trace_prefix(&mut self, prefix: &TracePrefix) { - self.trace_path.remove_prefix(prefix) - } - - /// Adds the specified prefix to the trace path. - pub fn add_trace_prefix(&mut self, prefix: TracePrefix) { - self.trace_path.add_prefix(prefix) - } + /// Removes the specified prefix from the trace path if there is a match, otherwise does + /// nothing. + pub fn remove_trace_prefix(&mut self, prefix: &TracePrefix) { + self.trace_path.remove_prefix(prefix) + } + + /// Adds the specified prefix to the trace path. + pub fn add_trace_prefix(&mut self, prefix: TracePrefix) { + self.trace_path.add_prefix(prefix) + } + + pub fn trace_path(&self) -> &TracePath { + &self.trace_path + } + + pub fn base_denom(&self) -> &BaseDenom { + &self.base_denom + } } /// Returns true if the denomination originally came from the sender chain and @@ -188,286 +200,256 @@ impl PrefixedDenom { /// created by the chain at the other end of A's port "transfer" and channel /// "someOtherChannel". pub fn is_sender_chain_source( - source_port: PortId, - source_channel: ChannelId, - denom: &PrefixedDenom, + source_port: PortId, + source_channel: ChannelId, + denom: &PrefixedDenom, ) -> bool { - !is_receiver_chain_source(source_port, source_channel, denom) + !is_receiver_chain_source(source_port, source_channel, denom) } /// Returns true if the denomination originally came from the receiving chain and false otherwise. pub fn is_receiver_chain_source( - source_port: PortId, - source_channel: ChannelId, - denom: &PrefixedDenom, + source_port: PortId, + source_channel: ChannelId, + denom: &PrefixedDenom, ) -> bool { - // For example, let - // A: sender chain in this transfer, port "transfer" and channel "c2b" (to B) - // B: receiver chain in this transfer, port "transfer" and channel "c2a" (to A) - // - // If B had originally sent the token in a previous tranfer, then A would have stored the token as - // "transfer/c2b/{token_denom}". Now, A is sending to B, so to check if B is the source of the token, - // we need to check if the token starts with "transfer/c2b". - let prefix = TracePrefix::new(source_port, source_channel); - denom.trace_path.starts_with(&prefix) + // For example, let + // A: sender chain in this transfer, port "transfer" and channel "c2b" (to B) + // B: receiver chain in this transfer, port "transfer" and channel "c2a" (to A) + // + // If B had originally sent the token in a previous tranfer, then A would have stored the token + // as "transfer/c2b/{token_denom}". Now, A is sending to B, so to check if B is the source of + // the token, we need to check if the token starts with "transfer/c2b". + let prefix = TracePrefix::new(source_port, source_channel); + denom.trace_path.starts_with(&prefix) } impl FromStr for PrefixedDenom { - type Err = Error; - - fn from_str(s: &str) -> Result { - let mut parts: Vec<&str> = s.split('/').collect(); - let last_part = parts.pop().expect("split() returned an empty iterator"); - - let (base_denom, trace_path) = { - if last_part == s { - (BaseDenom::from_str(s)?, TracePath::default()) - } else { - let base_denom = BaseDenom::from_str(last_part)?; - let trace_path = TracePath::try_from(parts)?; - (base_denom, trace_path) - } - }; - - Ok(Self { - trace_path, - base_denom, - }) - } + type Err = Error; + + fn from_str(s: &str) -> Result { + let mut parts: Vec<&str> = s.split('/').collect(); + let last_part = parts.pop().expect("split() returned an empty iterator"); + + let (base_denom, trace_path) = { + if last_part == s { + (BaseDenom::from_str(s)?, TracePath::default()) + } else { + let base_denom = BaseDenom::from_str(last_part)?; + let trace_path = TracePath::try_from(parts)?; + (base_denom, trace_path) + } + }; + + Ok(Self { trace_path, base_denom }) + } } impl TryFrom for PrefixedDenom { - type Error = Error; - - fn try_from(value: RawDenomTrace) -> Result { - let base_denom = BaseDenom::from_str(&value.base_denom)?; - let trace_path = TracePath::from_str(&value.path)?; - Ok(Self { - trace_path, - base_denom, - }) - } + type Error = Error; + + fn try_from(value: RawDenomTrace) -> Result { + let base_denom = BaseDenom::from_str(&value.base_denom)?; + let trace_path = TracePath::from_str(&value.path)?; + Ok(Self { trace_path, base_denom }) + } } impl From for RawDenomTrace { - fn from(value: PrefixedDenom) -> Self { - Self { - path: value.trace_path.to_string(), - base_denom: value.base_denom.to_string(), - } - } + fn from(value: PrefixedDenom) -> Self { + Self { path: value.trace_path.to_string(), base_denom: value.base_denom.to_string() } + } } impl From for PrefixedDenom { - fn from(denom: BaseDenom) -> Self { - Self { - trace_path: Default::default(), - base_denom: denom, - } - } + fn from(denom: BaseDenom) -> Self { + Self { trace_path: Default::default(), base_denom: denom } + } } impl fmt::Display for PrefixedDenom { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.trace_path.0.is_empty() { - write!(f, "{}", self.base_denom) - } else { - write!(f, "{}/{}", self.trace_path, self.base_denom) - } - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.trace_path.0.is_empty() { + write!(f, "{}", self.base_denom) + } else { + write!(f, "{}/{}", self.trace_path, self.base_denom) + } + } } /// A type for representing token transfer amounts. -#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Display, From, Into)] +#[derive( + Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize, Display, From, Into, +)] pub struct Amount(U256); impl Amount { - pub fn checked_add(self, rhs: Self) -> Option { - self.0.checked_add(rhs.0).map(Self) - } + pub fn checked_add(self, rhs: Self) -> Option { + self.0.checked_add(rhs.0).map(Self) + } + + pub fn checked_sub(self, rhs: Self) -> Option { + self.0.checked_sub(rhs.0).map(Self) + } - pub fn checked_sub(self, rhs: Self) -> Option { - self.0.checked_sub(rhs.0).map(Self) - } + pub fn as_u256(&self) -> U256 { + self.0 + } } impl FromStr for Amount { - type Err = Error; + type Err = Error; - fn from_str(s: &str) -> Result { - let amount = U256::from_str_radix(s, 10).map_err(Error::invalid_amount)?; - Ok(Self(amount)) - } + fn from_str(s: &str) -> Result { + let amount = U256::from_str_radix(s, 10).map_err(Error::invalid_amount)?; + Ok(Self(amount)) + } } impl From for Amount { - fn from(v: u64) -> Self { - Self(v.into()) - } + fn from(v: u64) -> Self { + Self(v.into()) + } } /// Coin defines a token with a denomination and an amount. #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Serialize, Deserialize)] pub struct Coin { - /// Denomination - pub denom: D, - /// Amount - #[serde(with = "serde_string")] - pub amount: Amount, + /// Denomination + pub denom: D, + /// Amount + #[serde(with = "serde_string")] + pub amount: Amount, } impl TryFrom for Coin where - Error: From<::Err>, + Error: From<::Err>, { - type Error = Error; + type Error = Error; - fn try_from(proto: RawCoin) -> Result, Self::Error> { - let denom = D::from_str(&proto.denom)?; - let amount = Amount::from_str(&proto.amount)?; - Ok(Self { denom, amount }) - } + fn try_from(proto: RawCoin) -> Result, Self::Error> { + let denom = D::from_str(&proto.denom)?; + let amount = Amount::from_str(&proto.amount)?; + Ok(Self { denom, amount }) + } } impl From> for RawCoin { - fn from(coin: Coin) -> RawCoin { - RawCoin { - denom: coin.denom.to_string(), - amount: coin.amount.to_string(), - } - } + fn from(coin: Coin) -> RawCoin { + RawCoin { denom: coin.denom.to_string(), amount: coin.amount.to_string() } + } } impl From for PrefixedCoin { - fn from(coin: BaseCoin) -> PrefixedCoin { - PrefixedCoin { - denom: coin.denom.into(), - amount: coin.amount, - } - } + fn from(coin: BaseCoin) -> PrefixedCoin { + PrefixedCoin { denom: coin.denom.into(), amount: coin.amount } + } } impl fmt::Display for PrefixedCoin { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}-{}", self.amount, self.denom) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}-{}", self.amount, self.denom) + } } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn test_denom_validation() -> Result<(), Error> { - assert!(BaseDenom::from_str("").is_err(), "empty base denom"); - assert!(BaseDenom::from_str("uatom").is_ok(), "valid base denom"); - assert!(PrefixedDenom::from_str("").is_err(), "empty denom trace"); - assert!( - PrefixedDenom::from_str("transfer/channel-0/").is_err(), - "empty base denom with trace" - ); - assert!(PrefixedDenom::from_str("/uatom").is_err(), "empty prefix"); - assert!(PrefixedDenom::from_str("//uatom").is_err(), "empty ids"); - assert!( - PrefixedDenom::from_str("transfer/").is_err(), - "single trace" - ); - assert!( - PrefixedDenom::from_str("transfer/atom").is_err(), - "single trace with base denom" - ); - assert!( - PrefixedDenom::from_str("transfer/channel-0/uatom").is_ok(), - "valid single trace info" - ); - assert!( - PrefixedDenom::from_str("transfer/channel-0/transfer/channel-1/uatom").is_ok(), - "valid multiple trace info" - ); - assert!( - PrefixedDenom::from_str("(transfer)/channel-0/uatom").is_err(), - "invalid port" - ); - assert!( - PrefixedDenom::from_str("transfer/(channel-0)/uatom").is_err(), - "invalid channel" - ); - - Ok(()) - } - - #[test] - fn test_denom_trace() -> Result<(), Error> { - assert_eq!( - PrefixedDenom::from_str("transfer/channel-0/uatom")?, - PrefixedDenom { - trace_path: "transfer/channel-0".parse()?, - base_denom: "uatom".parse()? - }, - "valid single trace info" - ); - assert_eq!( - PrefixedDenom::from_str("transfer/channel-0/transfer/channel-1/uatom")?, - PrefixedDenom { - trace_path: "transfer/channel-0/transfer/channel-1".parse()?, - base_denom: "uatom".parse()? - }, - "valid multiple trace info" - ); - - Ok(()) - } - - #[test] - fn test_denom_serde() -> Result<(), Error> { - let dt_str = "transfer/channel-0/uatom"; - let dt = PrefixedDenom::from_str(dt_str)?; - assert_eq!(dt.to_string(), dt_str, "valid single trace info"); - - let dt_str = "transfer/channel-0/transfer/channel-1/uatom"; - let dt = PrefixedDenom::from_str(dt_str)?; - assert_eq!(dt.to_string(), dt_str, "valid multiple trace info"); - - Ok(()) - } - - #[test] - fn test_trace_path() -> Result<(), Error> { - assert!(TracePath::from_str("").is_ok(), "empty trace path"); - assert!( - TracePath::from_str("transfer/uatom").is_err(), - "invalid trace path: bad ChannelId" - ); - assert!( - TracePath::from_str("transfer//uatom").is_err(), - "malformed trace path: missing ChannelId" - ); - assert!( - TracePath::from_str("transfer/channel-0/").is_err(), - "malformed trace path: trailing delimiter" - ); - - let prefix_1 = TracePrefix::new("transfer".parse().unwrap(), "channel-1".parse().unwrap()); - let prefix_2 = TracePrefix::new("transfer".parse().unwrap(), "channel-0".parse().unwrap()); - let mut trace_path = TracePath(vec![prefix_1.clone()]); - - trace_path.add_prefix(prefix_2.clone()); - assert_eq!( - TracePath::from_str("transfer/channel-0/transfer/channel-1")?, - trace_path - ); - assert_eq!( - TracePath(vec![prefix_1.clone(), prefix_2.clone()]), - trace_path - ); - - trace_path.remove_prefix(&prefix_2); - assert_eq!(TracePath::from_str("transfer/channel-1")?, trace_path); - assert_eq!(TracePath(vec![prefix_1.clone()]), trace_path); - - trace_path.remove_prefix(&prefix_1); - assert!(trace_path.is_empty()); - - Ok(()) - } + use super::*; + + #[test] + fn test_denom_validation() -> Result<(), Error> { + assert!(BaseDenom::from_str("").is_err(), "empty base denom"); + assert!(BaseDenom::from_str("uatom").is_ok(), "valid base denom"); + assert!(PrefixedDenom::from_str("").is_err(), "empty denom trace"); + assert!( + PrefixedDenom::from_str("transfer/channel-0/").is_err(), + "empty base denom with trace" + ); + assert!(PrefixedDenom::from_str("/uatom").is_err(), "empty prefix"); + assert!(PrefixedDenom::from_str("//uatom").is_err(), "empty ids"); + assert!(PrefixedDenom::from_str("transfer/").is_err(), "single trace"); + assert!(PrefixedDenom::from_str("transfer/atom").is_err(), "single trace with base denom"); + assert!( + PrefixedDenom::from_str("transfer/channel-0/uatom").is_ok(), + "valid single trace info" + ); + assert!( + PrefixedDenom::from_str("transfer/channel-0/transfer/channel-1/uatom").is_ok(), + "valid multiple trace info" + ); + assert!(PrefixedDenom::from_str("(transfer)/channel-0/uatom").is_err(), "invalid port"); + assert!(PrefixedDenom::from_str("transfer/(channel-0)/uatom").is_err(), "invalid channel"); + + Ok(()) + } + + #[test] + fn test_denom_trace() -> Result<(), Error> { + assert_eq!( + PrefixedDenom::from_str("transfer/channel-0/uatom")?, + PrefixedDenom { + trace_path: "transfer/channel-0".parse()?, + base_denom: "uatom".parse()? + }, + "valid single trace info" + ); + assert_eq!( + PrefixedDenom::from_str("transfer/channel-0/transfer/channel-1/uatom")?, + PrefixedDenom { + trace_path: "transfer/channel-0/transfer/channel-1".parse()?, + base_denom: "uatom".parse()? + }, + "valid multiple trace info" + ); + + Ok(()) + } + + #[test] + fn test_denom_serde() -> Result<(), Error> { + let dt_str = "transfer/channel-0/uatom"; + let dt = PrefixedDenom::from_str(dt_str)?; + assert_eq!(dt.to_string(), dt_str, "valid single trace info"); + + let dt_str = "transfer/channel-0/transfer/channel-1/uatom"; + let dt = PrefixedDenom::from_str(dt_str)?; + assert_eq!(dt.to_string(), dt_str, "valid multiple trace info"); + + Ok(()) + } + + #[test] + fn test_trace_path() -> Result<(), Error> { + assert!(TracePath::from_str("").is_ok(), "empty trace path"); + assert!( + TracePath::from_str("transfer/uatom").is_err(), + "invalid trace path: bad ChannelId" + ); + assert!( + TracePath::from_str("transfer//uatom").is_err(), + "malformed trace path: missing ChannelId" + ); + assert!( + TracePath::from_str("transfer/channel-0/").is_err(), + "malformed trace path: trailing delimiter" + ); + + let prefix_1 = TracePrefix::new("transfer".parse().unwrap(), "channel-1".parse().unwrap()); + let prefix_2 = TracePrefix::new("transfer".parse().unwrap(), "channel-0".parse().unwrap()); + let mut trace_path = TracePath(vec![prefix_1.clone()]); + + trace_path.add_prefix(prefix_2.clone()); + assert_eq!(TracePath::from_str("transfer/channel-0/transfer/channel-1")?, trace_path); + assert_eq!(TracePath(vec![prefix_1.clone(), prefix_2.clone()]), trace_path); + + trace_path.remove_prefix(&prefix_2); + assert_eq!(TracePath::from_str("transfer/channel-1")?, trace_path); + assert_eq!(TracePath(vec![prefix_1.clone()]), trace_path); + + trace_path.remove_prefix(&prefix_1); + assert!(trace_path.is_empty()); + + Ok(()) + } } diff --git a/modules/src/applications/transfer/error.rs b/modules/src/applications/transfer/error.rs index c9f71fdc33..edea8e86fd 100644 --- a/modules/src/applications/transfer/error.rs +++ b/modules/src/applications/transfer/error.rs @@ -5,136 +5,140 @@ use subtle_encoding::Error as EncodingError; use tendermint_proto::Error as TendermintProtoError; use uint::FromStrRadixErr; -use crate::core::ics04_channel::channel::Order; -use crate::core::ics04_channel::error as channel_error; -use crate::core::ics04_channel::Version; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::prelude::*; -use crate::signer::SignerError; +use crate::{ + core::{ + ics04_channel::{channel::Order, error as channel_error, Version}, + ics24_host::{ + error::ValidationError, + identifier::{ChannelId, PortId}, + }, + }, + prelude::*, + signer::SignerError, +}; define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - UnknowMessageTypeUrl - { url: String } - | e | { format_args!("unrecognized ICS-20 transfer message type URL {0}", e.url) }, + #[derive(Debug, PartialEq, Eq)] + Error { + UnknowMessageTypeUrl + { url: String } + | e | { format_args!("unrecognized ICS-20 transfer message type URL {0}", e.url) }, - Ics04Channel - [ channel_error::Error ] - |_ | { "Ics04 channel error" }, + Ics04Channel + [ channel_error::Error ] + |_ | { "Ics04 channel error" }, - DestinationChannelNotFound - { port_id: PortId, channel_id: ChannelId } - | e | { format_args!("destination channel not found in the counterparty of port_id {0} and channel_id {1} ", e.port_id, e.channel_id) }, + DestinationChannelNotFound + { port_id: PortId, channel_id: ChannelId } + | e | { format_args!("destination channel not found in the counterparty of port_id {0} and channel_id {1} ", e.port_id, e.channel_id) }, - InvalidPortId - { context: String } - [ ValidationError ] - | _ | { "invalid port identifier" }, + InvalidPortId + { context: String } + [ ValidationError ] + | _ | { "invalid port identifier" }, - InvalidChannelId - { context: String } - [ ValidationError ] - | _ | { "invalid channel identifier" }, + InvalidChannelId + { context: String } + [ ValidationError ] + | _ | { "invalid channel identifier" }, - InvalidPacketTimeoutHeight - { context: String } - | _ | { "invalid packet timeout height value" }, + InvalidPacketTimeoutHeight + { context: String } + | _ | { "invalid packet timeout height value" }, - InvalidPacketTimeoutTimestamp - { timestamp: u64 } - | _ | { "invalid packet timeout timestamp value" }, + InvalidPacketTimeoutTimestamp + { timestamp: u64 } + | _ | { "invalid packet timeout timestamp value" }, - Utf8 - [ DisplayOnly ] - | _ | { "utf8 decoding error" }, + Utf8 + [ DisplayOnly ] + | _ | { "utf8 decoding error" }, - EmptyBaseDenom - |_| { "base denomination is empty" }, + EmptyBaseDenom + |_| { "base denomination is empty" }, - InvalidTracePortId - { pos: usize } - [ ValidationError ] - | e | { format_args!("invalid port id in trace at position: {0}", e.pos) }, + InvalidTracePortId + { pos: usize } + [ ValidationError ] + | e | { format_args!("invalid port id in trace at position: {0}", e.pos) }, - InvalidTraceChannelId - { pos: usize } - [ ValidationError ] - | e | { format_args!("invalid channel id in trace at position: {0}", e.pos) }, + InvalidTraceChannelId + { pos: usize } + [ ValidationError ] + | e | { format_args!("invalid channel id in trace at position: {0}", e.pos) }, - InvalidTraceLength - { len: usize } - | e | { format_args!("trace length must be even but got: {0}", e.len) }, + InvalidTraceLength + { len: usize } + | e | { format_args!("trace length must be even but got: {0}", e.len) }, - InvalidAmount - [ TraceError ] - | _ | { "invalid amount" }, + InvalidAmount + [ TraceError ] + | _ | { "invalid amount" }, - InvalidToken - | _ | { "invalid token" }, + InvalidToken + | _ | { "invalid token" }, - Signer - [ SignerError ] - | _ | { "failed to parse signer" }, + Signer + [ SignerError ] + | _ | { "failed to parse signer" }, - MissingDenomIbcPrefix - | _ | { "missing 'ibc/' prefix in denomination" }, + MissingDenomIbcPrefix + | _ | { "missing 'ibc/' prefix in denomination" }, - MalformedHashDenom - | _ | { "hashed denom must be of the form 'ibc/{Hash}'" }, + MalformedHashDenom + | _ | { "hashed denom must be of the form 'ibc/{Hash}'" }, - ParseHex - [ TraceError ] - | _ | { "invalid hex string" }, + ParseHex + [ TraceError ] + | _ | { "invalid hex string" }, - ChanSeqExceedsLimit - { sequence: u64 } - | e | { format_args!("channel sequence ({0}) exceeds limit of {1}", e.sequence, u32::MAX) }, + ChanSeqExceedsLimit + { sequence: u64 } + | e | { format_args!("channel sequence ({0}) exceeds limit of {1}", e.sequence, u32::MAX) }, - ChannelNotUnordered - { order: Order } - | e | { format_args!("expected '{0}' channel, got '{1}'", Order::Unordered, e.order) }, + ChannelNotUnordered + { order: Order } + | e | { format_args!("expected '{0}' channel, got '{1}'", Order::Unordered, e.order) }, - InvalidVersion - { version: Version } - | e | { format_args!("expected version '{0}', got '{1}'", Version::ics20(), e.version) }, + InvalidVersion + { version: Version } + | e | { format_args!("expected version '{0}', got '{1}'", Version::ics20(), e.version) }, - InvalidCounterpartyVersion - { version: Version } - | e | { format_args!("expected counterparty version '{0}', got '{1}'", Version::ics20(), e.version) }, + InvalidCounterpartyVersion + { version: Version } + | e | { format_args!("expected counterparty version '{0}', got '{1}'", Version::ics20(), e.version) }, - CantCloseChannel - | _ | { "channel cannot be closed" }, + CantCloseChannel + | _ | { "channel cannot be closed" }, - PacketDataDeserialization - | _ | { "failed to deserialize packet data" }, + PacketDataDeserialization + | _ | { "failed to deserialize packet data" }, - AckDeserialization - | _ | { "failed to deserialize acknowledgement" }, + AckDeserialization + | _ | { "failed to deserialize acknowledgement" }, - ReceiveDisabled - | _ | { "receive is not enabled" }, + ReceiveDisabled + | _ | { "receive is not enabled" }, - SendDisabled - | _ | { "send is not enabled" }, + SendDisabled + | _ | { "send is not enabled" }, - ParseAccountFailure - | _ | { "failed to parse as AccountId" }, + ParseAccountFailure + | _ | { "failed to parse as AccountId" }, - InvalidPort - { port_id: PortId, exp_port_id: PortId } - | e | { format_args!("invalid port: '{0}', expected '{1}'", e.port_id, e.exp_port_id) }, + InvalidPort + { port_id: PortId, exp_port_id: PortId } + | e | { format_args!("invalid port: '{0}', expected '{1}'", e.port_id, e.exp_port_id) }, - TraceNotFound - | _ | { "no trace associated with specified hash" }, + TraceNotFound + | _ | { "no trace associated with specified hash" }, - DecodeRawMsg - [ TraceError ] - | _ | { "error decoding raw msg" }, + DecodeRawMsg + [ TraceError ] + | _ | { "error decoding raw msg" }, - UnknownMsgType - { msg_type: String } - | e | { format_args!("unknown msg type: {0}", e.msg_type) }, - } + UnknownMsgType + { msg_type: String } + | e | { format_args!("unknown msg type: {0}", e.msg_type) }, + } } diff --git a/modules/src/applications/transfer/events.rs b/modules/src/applications/transfer/events.rs index 590638f448..546fc19b70 100644 --- a/modules/src/applications/transfer/events.rs +++ b/modules/src/applications/transfer/events.rs @@ -1,8 +1,11 @@ -use crate::applications::transfer::acknowledgement::Acknowledgement; -use crate::applications::transfer::{Amount, PrefixedDenom, MODULE_ID_STR}; -use crate::events::ModuleEvent; -use crate::prelude::*; -use crate::signer::Signer; +use crate::{ + applications::transfer::{ + acknowledgement::Acknowledgement, Amount, PrefixedDenom, MODULE_ID_STR, + }, + events::ModuleEvent, + prelude::*, + signer::Signer, +}; const EVENT_TYPE_PACKET: &str = "fungible_token_packet"; const EVENT_TYPE_TIMEOUT: &str = "timeout"; @@ -10,163 +13,147 @@ const EVENT_TYPE_DENOM_TRACE: &str = "denomination_trace"; const EVENT_TYPE_TRANSFER: &str = "ibc_transfer"; pub enum Event { - Recv(RecvEvent), - Ack(AckEvent), - AckStatus(AckStatusEvent), - Timeout(TimeoutEvent), - DenomTrace(DenomTraceEvent), - Transfer(TransferEvent), + Recv(RecvEvent), + Ack(AckEvent), + AckStatus(AckStatusEvent), + Timeout(TimeoutEvent), + DenomTrace(DenomTraceEvent), + Transfer(TransferEvent), } pub struct RecvEvent { - pub receiver: Signer, - pub denom: PrefixedDenom, - pub amount: Amount, - pub success: bool, + pub receiver: Signer, + pub denom: PrefixedDenom, + pub amount: Amount, + pub success: bool, } impl From for ModuleEvent { - fn from(ev: RecvEvent) -> Self { - let RecvEvent { - receiver, - denom, - amount, - success, - } = ev; - Self { - kind: EVENT_TYPE_PACKET.to_string(), - module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), - attributes: vec![ - ("receiver", receiver).into(), - ("denom", denom).into(), - ("amount", amount).into(), - ("success", success).into(), - ], - } - } + fn from(ev: RecvEvent) -> Self { + let RecvEvent { receiver, denom, amount, success } = ev; + Self { + kind: EVENT_TYPE_PACKET.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![ + ("receiver", receiver).into(), + ("denom", denom).into(), + ("amount", amount).into(), + ("success", success).into(), + ], + } + } } pub struct AckEvent { - pub receiver: Signer, - pub denom: PrefixedDenom, - pub amount: Amount, - pub acknowledgement: Acknowledgement, + pub receiver: Signer, + pub denom: PrefixedDenom, + pub amount: Amount, + pub acknowledgement: Acknowledgement, } impl From for ModuleEvent { - fn from(ev: AckEvent) -> Self { - let AckEvent { - receiver, - denom, - amount, - acknowledgement, - } = ev; - Self { - kind: EVENT_TYPE_PACKET.to_string(), - module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), - attributes: vec![ - ("receiver", receiver).into(), - ("denom", denom).into(), - ("amount", amount).into(), - ("acknowledgement", acknowledgement).into(), - ], - } - } + fn from(ev: AckEvent) -> Self { + let AckEvent { receiver, denom, amount, acknowledgement } = ev; + Self { + kind: EVENT_TYPE_PACKET.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![ + ("receiver", receiver).into(), + ("denom", denom).into(), + ("amount", amount).into(), + ("acknowledgement", acknowledgement).into(), + ], + } + } } pub struct AckStatusEvent { - pub acknowledgement: Acknowledgement, + pub acknowledgement: Acknowledgement, } impl From for ModuleEvent { - fn from(ev: AckStatusEvent) -> Self { - let AckStatusEvent { acknowledgement } = ev; - let mut event = Self { - kind: EVENT_TYPE_PACKET.to_string(), - module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), - attributes: vec![], - }; - let attr_label = match acknowledgement { - Acknowledgement::Success(_) => "success", - Acknowledgement::Error(_) => "error", - }; - event - .attributes - .push((attr_label, acknowledgement.to_string()).into()); - event - } + fn from(ev: AckStatusEvent) -> Self { + let AckStatusEvent { acknowledgement } = ev; + let mut event = Self { + kind: EVENT_TYPE_PACKET.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![], + }; + let attr_label = match acknowledgement { + Acknowledgement::Success(_) => "success", + Acknowledgement::Error(_) => "error", + }; + event.attributes.push((attr_label, acknowledgement.to_string()).into()); + event + } } pub struct TimeoutEvent { - pub refund_receiver: Signer, - pub refund_denom: PrefixedDenom, - pub refund_amount: Amount, + pub refund_receiver: Signer, + pub refund_denom: PrefixedDenom, + pub refund_amount: Amount, } impl From for ModuleEvent { - fn from(ev: TimeoutEvent) -> Self { - let TimeoutEvent { - refund_receiver, - refund_denom, - refund_amount, - } = ev; - Self { - kind: EVENT_TYPE_TIMEOUT.to_string(), - module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), - attributes: vec![ - ("refund_receiver", refund_receiver).into(), - ("refund_denom", refund_denom).into(), - ("refund_amount", refund_amount).into(), - ], - } - } + fn from(ev: TimeoutEvent) -> Self { + let TimeoutEvent { refund_receiver, refund_denom, refund_amount } = ev; + Self { + kind: EVENT_TYPE_TIMEOUT.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![ + ("refund_receiver", refund_receiver).into(), + ("refund_denom", refund_denom).into(), + ("refund_amount", refund_amount).into(), + ], + } + } } pub struct DenomTraceEvent { - pub trace_hash: Option, - pub denom: PrefixedDenom, + pub trace_hash: Option, + pub denom: PrefixedDenom, } impl From for ModuleEvent { - fn from(ev: DenomTraceEvent) -> Self { - let DenomTraceEvent { trace_hash, denom } = ev; - let mut ev = Self { - kind: EVENT_TYPE_DENOM_TRACE.to_string(), - module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), - attributes: vec![("denom", denom).into()], - }; - if let Some(hash) = trace_hash { - ev.attributes.push(("trace_hash", hash).into()); - } - ev - } + fn from(ev: DenomTraceEvent) -> Self { + let DenomTraceEvent { trace_hash, denom } = ev; + let mut ev = Self { + kind: EVENT_TYPE_DENOM_TRACE.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![("denom", denom).into()], + }; + if let Some(hash) = trace_hash { + ev.attributes.push(("trace_hash", hash).into()); + } + ev + } } pub struct TransferEvent { - pub sender: Signer, - pub receiver: Signer, + pub sender: Signer, + pub receiver: Signer, } impl From for ModuleEvent { - fn from(ev: TransferEvent) -> Self { - let TransferEvent { sender, receiver } = ev; - Self { - kind: EVENT_TYPE_TRANSFER.to_string(), - module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), - attributes: vec![("sender", sender).into(), ("receiver", receiver).into()], - } - } + fn from(ev: TransferEvent) -> Self { + let TransferEvent { sender, receiver } = ev; + Self { + kind: EVENT_TYPE_TRANSFER.to_string(), + module_name: MODULE_ID_STR.parse().expect("invalid ModuleId"), + attributes: vec![("sender", sender).into(), ("receiver", receiver).into()], + } + } } impl From for ModuleEvent { - fn from(ev: Event) -> Self { - match ev { - Event::Recv(ev) => ev.into(), - Event::Ack(ev) => ev.into(), - Event::AckStatus(ev) => ev.into(), - Event::Timeout(ev) => ev.into(), - Event::DenomTrace(ev) => ev.into(), - Event::Transfer(ev) => ev.into(), - } - } + fn from(ev: Event) -> Self { + match ev { + Event::Recv(ev) => ev.into(), + Event::Ack(ev) => ev.into(), + Event::AckStatus(ev) => ev.into(), + Event::Timeout(ev) => ev.into(), + Event::DenomTrace(ev) => ev.into(), + Event::Transfer(ev) => ev.into(), + } + } } diff --git a/modules/src/applications/transfer/msgs/transfer.rs b/modules/src/applications/transfer/msgs/transfer.rs index ee9c8afd1f..8d0dd2e8af 100644 --- a/modules/src/applications/transfer/msgs/transfer.rs +++ b/modules/src/applications/transfer/msgs/transfer.rs @@ -2,17 +2,22 @@ use crate::prelude::*; -use ibc_proto::cosmos::base::v1beta1::Coin; -use ibc_proto::google::protobuf::Any; -use ibc_proto::ibc::applications::transfer::v1::MsgTransfer as RawMsgTransfer; +use ibc_proto::{ + cosmos::base::v1beta1::Coin, google::protobuf::Any, + ibc::applications::transfer::v1::MsgTransfer as RawMsgTransfer, +}; use tendermint_proto::Protobuf; -use crate::applications::transfer::error::Error; -use crate::core::ics02_client::height::Height; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::signer::Signer; -use crate::timestamp::Timestamp; -use crate::tx_msg::Msg; +use crate::{ + applications::transfer::error::Error, + core::{ + ics02_client::height::Height, + ics24_host::identifier::{ChannelId, PortId}, + }, + signer::Signer, + timestamp::Timestamp, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.applications.transfer.v1.MsgTransfer"; @@ -25,141 +30,129 @@ pub const TYPE_URL: &str = "/ibc.applications.transfer.v1.MsgTransfer"; /// let the library figure out how to build the packet properly. #[derive(Clone, Debug, PartialEq)] pub struct MsgTransfer { - /// the port on which the packet will be sent - pub source_port: PortId, - /// the channel by which the packet will be sent - pub source_channel: ChannelId, - /// the tokens to be transferred - pub token: C, - /// the sender address - pub sender: Signer, - /// the recipient address on the destination chain - pub receiver: Signer, - /// Timeout height relative to the current block height. - /// The timeout is disabled when set to 0. - pub timeout_height: Height, - /// Timeout timestamp relative to the current block timestamp. - /// The timeout is disabled when set to 0. - pub timeout_timestamp: Timestamp, + /// the port on which the packet will be sent + pub source_port: PortId, + /// the channel by which the packet will be sent + pub source_channel: ChannelId, + /// the tokens to be transferred + pub token: C, + /// the sender address + pub sender: Signer, + /// the recipient address on the destination chain + pub receiver: Signer, + /// Timeout height relative to the current block height. + /// The timeout is disabled when set to 0. + pub timeout_height: Height, + /// Timeout timestamp relative to the current block timestamp. + /// The timeout is disabled when set to 0. + pub timeout_timestamp: Timestamp, } impl Msg for MsgTransfer { - type ValidationError = Error; - type Raw = RawMsgTransfer; + type ValidationError = Error; + type Raw = RawMsgTransfer; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl TryFrom for MsgTransfer { - type Error = Error; - - fn try_from(raw_msg: RawMsgTransfer) -> Result { - let timeout_timestamp = Timestamp::from_nanoseconds(raw_msg.timeout_timestamp) - .map_err(|_| Error::invalid_packet_timeout_timestamp(raw_msg.timeout_timestamp))?; - - let timeout_height = match raw_msg.timeout_height.clone() { - None => Height::zero(), - Some(raw_height) => raw_height.try_into().map_err(|e| { - Error::invalid_packet_timeout_height(format!("invalid timeout height {}", e)) - })?, - }; - - Ok(MsgTransfer { - source_port: raw_msg - .source_port - .parse() - .map_err(|e| Error::invalid_port_id(raw_msg.source_port.clone(), e))?, - source_channel: raw_msg - .source_channel - .parse() - .map_err(|e| Error::invalid_channel_id(raw_msg.source_channel.clone(), e))?, - token: raw_msg.token.ok_or_else(Error::invalid_token)?, - sender: raw_msg.sender.parse().map_err(Error::signer)?, - receiver: raw_msg.receiver.parse().map_err(Error::signer)?, - timeout_height, - timeout_timestamp, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgTransfer) -> Result { + let timeout_timestamp = Timestamp::from_nanoseconds(raw_msg.timeout_timestamp) + .map_err(|_| Error::invalid_packet_timeout_timestamp(raw_msg.timeout_timestamp))?; + + let timeout_height = match raw_msg.timeout_height.clone() { + None => Height::zero(), + Some(raw_height) => raw_height.try_into().map_err(|e| { + Error::invalid_packet_timeout_height(format!("invalid timeout height {}", e)) + })?, + }; + + Ok(MsgTransfer { + source_port: raw_msg + .source_port + .parse() + .map_err(|e| Error::invalid_port_id(raw_msg.source_port.clone(), e))?, + source_channel: raw_msg + .source_channel + .parse() + .map_err(|e| Error::invalid_channel_id(raw_msg.source_channel.clone(), e))?, + token: raw_msg.token.ok_or_else(Error::invalid_token)?, + sender: raw_msg.sender.parse().map_err(Error::signer)?, + receiver: raw_msg.receiver.parse().map_err(Error::signer)?, + timeout_height, + timeout_timestamp, + }) + } } impl From for RawMsgTransfer { - fn from(domain_msg: MsgTransfer) -> Self { - RawMsgTransfer { - source_port: domain_msg.source_port.to_string(), - source_channel: domain_msg.source_channel.to_string(), - token: Some(domain_msg.token), - sender: domain_msg.sender.to_string(), - receiver: domain_msg.receiver.to_string(), - timeout_height: Some(domain_msg.timeout_height.into()), - timeout_timestamp: domain_msg.timeout_timestamp.nanoseconds(), - } - } + fn from(domain_msg: MsgTransfer) -> Self { + RawMsgTransfer { + source_port: domain_msg.source_port.to_string(), + source_channel: domain_msg.source_channel.to_string(), + token: Some(domain_msg.token), + sender: domain_msg.sender.to_string(), + receiver: domain_msg.receiver.to_string(), + timeout_height: Some(domain_msg.timeout_height.into()), + timeout_timestamp: domain_msg.timeout_timestamp.nanoseconds(), + } + } } impl Protobuf for MsgTransfer {} impl TryFrom for MsgTransfer { - type Error = Error; - - fn try_from(raw: Any) -> Result { - match raw.type_url.as_str() { - TYPE_URL => MsgTransfer::decode_vec(&raw.value).map_err(Error::decode_raw_msg), - _ => Err(Error::unknown_msg_type(raw.type_url)), - } - } + type Error = Error; + + fn try_from(raw: Any) -> Result { + match raw.type_url.as_str() { + TYPE_URL => MsgTransfer::decode_vec(&raw.value).map_err(Error::decode_raw_msg), + _ => Err(Error::unknown_msg_type(raw.type_url)), + } + } } impl From for Any { - fn from(msg: MsgTransfer) -> Self { - Self { - type_url: TYPE_URL.to_string(), - value: msg - .encode_vec() - .expect("encoding to `Any` from `MsgTranfer`"), - } - } + fn from(msg: MsgTransfer) -> Self { + Self { type_url: TYPE_URL.to_string(), value: msg.encode_vec() } + } } #[cfg(test)] pub mod test_util { - use core::ops::Add; - use core::time::Duration; - - use super::MsgTransfer; - use crate::bigint::U256; - use crate::signer::Signer; - use crate::{ - applications::transfer::{BaseCoin, PrefixedCoin}, - core::ics24_host::identifier::{ChannelId, PortId}, - test_utils::get_dummy_bech32_account, - timestamp::Timestamp, - Height, - }; - - // Returns a dummy ICS20 `MsgTransfer`, for testing only! - pub fn get_dummy_msg_transfer(height: u64) -> MsgTransfer { - let address: Signer = get_dummy_bech32_account().as_str().parse().unwrap(); - MsgTransfer { - source_port: PortId::default(), - source_channel: ChannelId::default(), - token: BaseCoin { - denom: "uatom".parse().unwrap(), - amount: U256::from(10).into(), - } - .into(), - sender: address.clone(), - receiver: address, - timeout_timestamp: Timestamp::now().add(Duration::from_secs(10)).unwrap(), - timeout_height: Height { - revision_number: 0, - revision_height: height, - }, - } - } + use core::{ops::Add, time::Duration}; + + use super::MsgTransfer; + use crate::{ + applications::transfer::{BaseCoin, PrefixedCoin}, + bigint::U256, + core::ics24_host::identifier::{ChannelId, PortId}, + signer::Signer, + test_utils::get_dummy_bech32_account, + timestamp::Timestamp, + Height, + }; + + // Returns a dummy ICS20 `MsgTransfer`, for testing only! + pub fn get_dummy_msg_transfer(height: u64) -> MsgTransfer { + let address: Signer = get_dummy_bech32_account().as_str().parse().unwrap(); + MsgTransfer { + source_port: PortId::default(), + source_channel: ChannelId::default(), + token: BaseCoin { denom: "uatom".parse().unwrap(), amount: U256::from(10).into() } + .into(), + sender: address.clone(), + receiver: address, + timeout_timestamp: Timestamp::now().add(Duration::from_secs(10)).unwrap(), + timeout_height: Height { revision_number: 0, revision_height: height }, + } + } } diff --git a/modules/src/applications/transfer/packet.rs b/modules/src/applications/transfer/packet.rs index 643ee5e465..d706334b3c 100644 --- a/modules/src/applications/transfer/packet.rs +++ b/modules/src/applications/transfer/packet.rs @@ -1,43 +1,41 @@ use alloc::string::ToString; -use core::convert::TryFrom; -use core::str::FromStr; +use core::{convert::TryFrom, str::FromStr}; use ibc_proto::ibc::applications::transfer::v2::FungibleTokenPacketData as RawPacketData; use serde::{Deserialize, Serialize}; -use super::error::Error; -use super::{Amount, PrefixedCoin, PrefixedDenom}; +use super::{error::Error, Amount, PrefixedCoin, PrefixedDenom}; use crate::signer::Signer; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PacketData { - pub token: PrefixedCoin, - pub sender: Signer, - pub receiver: Signer, + pub token: PrefixedCoin, + pub sender: Signer, + pub receiver: Signer, } impl TryFrom for PacketData { - type Error = Error; + type Error = Error; - fn try_from(raw_pkt_data: RawPacketData) -> Result { - // This denom may be prefixed or unprefixed. - let denom = PrefixedDenom::from_str(&raw_pkt_data.denom)?; - let amount = Amount::from_str(&raw_pkt_data.amount)?; - Ok(Self { - token: PrefixedCoin { denom, amount }, - sender: raw_pkt_data.sender.parse().map_err(Error::signer)?, - receiver: raw_pkt_data.receiver.parse().map_err(Error::signer)?, - }) - } + fn try_from(raw_pkt_data: RawPacketData) -> Result { + // This denom may be prefixed or unprefixed. + let denom = PrefixedDenom::from_str(&raw_pkt_data.denom)?; + let amount = Amount::from_str(&raw_pkt_data.amount)?; + Ok(Self { + token: PrefixedCoin { denom, amount }, + sender: raw_pkt_data.sender.parse().map_err(Error::signer)?, + receiver: raw_pkt_data.receiver.parse().map_err(Error::signer)?, + }) + } } impl From for RawPacketData { - fn from(pkt_data: PacketData) -> Self { - Self { - denom: pkt_data.token.denom.to_string(), - amount: pkt_data.token.amount.to_string(), - sender: pkt_data.sender.to_string(), - receiver: pkt_data.receiver.to_string(), - } - } + fn from(pkt_data: PacketData) -> Self { + Self { + denom: pkt_data.token.denom.to_string(), + amount: pkt_data.token.amount.to_string(), + sender: pkt_data.sender.to_string(), + receiver: pkt_data.receiver.to_string(), + } + } } diff --git a/modules/src/applications/transfer/relay.rs b/modules/src/applications/transfer/relay.rs index 63fb15103b..934c48afbb 100644 --- a/modules/src/applications/transfer/relay.rs +++ b/modules/src/applications/transfer/relay.rs @@ -1,10 +1,12 @@ //! This module implements the processing logic for ICS20 (token transfer) message. -use crate::applications::transfer::context::Ics20Context; -use crate::applications::transfer::error::Error as Ics20Error; -use crate::applications::transfer::is_sender_chain_source; -use crate::applications::transfer::packet::PacketData; -use crate::core::ics04_channel::packet::Packet; -use crate::prelude::*; +use crate::{ + applications::transfer::{ + context::Ics20Context, error::Error as Ics20Error, is_sender_chain_source, + packet::PacketData, + }, + core::ics04_channel::packet::Packet, + prelude::*, +}; pub mod on_ack_packet; pub mod on_recv_packet; @@ -12,29 +14,26 @@ pub mod on_timeout_packet; pub mod send_transfer; fn refund_packet_token( - ctx: &mut impl Ics20Context, - packet: &Packet, - data: &PacketData, + ctx: &mut impl Ics20Context, + packet: &Packet, + data: &PacketData, ) -> Result<(), Ics20Error> { - let sender = data - .sender - .clone() - .try_into() - .map_err(|_| Ics20Error::parse_account_failure())?; + let sender = data + .sender + .clone() + .try_into() + .map_err(|_| Ics20Error::parse_account_failure())?; - if is_sender_chain_source( - packet.source_port.clone(), - packet.source_channel, - &data.token.denom, - ) { - // unescrow tokens back to sender - let escrow_address = - ctx.get_channel_escrow_address(&packet.source_port, packet.source_channel)?; + if is_sender_chain_source(packet.source_port.clone(), packet.source_channel, &data.token.denom) + { + // unescrow tokens back to sender + let escrow_address = + ctx.get_channel_escrow_address(&packet.source_port, packet.source_channel)?; - ctx.send_coins(&escrow_address, &sender, &data.token) - } - // mint vouchers back to sender - else { - ctx.mint_coins(&sender, &data.token) - } + ctx.send_coins(&escrow_address, &sender, &data.token) + } + // mint vouchers back to sender + else { + ctx.mint_coins(&sender, &data.token) + } } diff --git a/modules/src/applications/transfer/relay/on_ack_packet.rs b/modules/src/applications/transfer/relay/on_ack_packet.rs index a254de1fc9..5efaba8959 100644 --- a/modules/src/applications/transfer/relay/on_ack_packet.rs +++ b/modules/src/applications/transfer/relay/on_ack_packet.rs @@ -1,19 +1,20 @@ -use crate::applications::transfer::acknowledgement::Acknowledgement; -use crate::applications::transfer::context::Ics20Context; -use crate::applications::transfer::error::Error as Ics20Error; -use crate::applications::transfer::packet::PacketData; -use crate::applications::transfer::relay::refund_packet_token; -use crate::core::ics04_channel::packet::Packet; +use crate::{ + applications::transfer::{ + acknowledgement::Acknowledgement, context::Ics20Context, error::Error as Ics20Error, + packet::PacketData, relay::refund_packet_token, + }, + core::ics04_channel::packet::Packet, +}; pub fn process_ack_packet( - ctx: &mut impl Ics20Context, - packet: &Packet, - data: &PacketData, - ack: &Acknowledgement, + ctx: &mut impl Ics20Context, + packet: &Packet, + data: &PacketData, + ack: &Acknowledgement, ) -> Result<(), Ics20Error> { - if matches!(ack, Acknowledgement::Error(_)) { - refund_packet_token(ctx, packet, data)?; - } + if matches!(ack, Acknowledgement::Error(_)) { + refund_packet_token(ctx, packet, data)?; + } - Ok(()) + Ok(()) } diff --git a/modules/src/applications/transfer/relay/on_recv_packet.rs b/modules/src/applications/transfer/relay/on_recv_packet.rs index f6069ee10a..60a033da96 100644 --- a/modules/src/applications/transfer/relay/on_recv_packet.rs +++ b/modules/src/applications/transfer/relay/on_recv_packet.rs @@ -1,68 +1,70 @@ -use crate::applications::transfer::context::Ics20Context; -use crate::applications::transfer::error::Error as Ics20Error; -use crate::applications::transfer::events::DenomTraceEvent; -use crate::applications::transfer::packet::PacketData; -use crate::applications::transfer::{is_receiver_chain_source, TracePrefix}; -use crate::core::ics04_channel::packet::Packet; -use crate::core::ics26_routing::context::{ModuleOutputBuilder, WriteFn}; -use crate::prelude::*; +use crate::{ + applications::transfer::{ + context::Ics20Context, error::Error as Ics20Error, events::DenomTraceEvent, + is_receiver_chain_source, packet::PacketData, TracePrefix, + }, + core::{ + ics04_channel::packet::Packet, + ics26_routing::context::{ModuleOutputBuilder, WriteFn}, + }, + prelude::*, +}; pub fn process_recv_packet( - ctx: &Ctx, - output: &mut ModuleOutputBuilder, - packet: &Packet, - data: PacketData, + ctx: &Ctx, + output: &mut ModuleOutputBuilder, + packet: &Packet, + data: PacketData, ) -> Result, Ics20Error> { - if !ctx.is_receive_enabled() { - return Err(Ics20Error::receive_disabled()); - } + if !ctx.is_receive_enabled() { + return Err(Ics20Error::receive_disabled()) + } - let receiver_account = data - .receiver - .clone() - .try_into() - .map_err(|_| Ics20Error::parse_account_failure())?; + let receiver_account = data + .receiver + .clone() + .try_into() + .map_err(|_| Ics20Error::parse_account_failure())?; - if is_receiver_chain_source( - packet.source_port.clone(), - packet.source_channel, - &data.token.denom, - ) { - // sender chain is not the source, unescrow tokens - let prefix = TracePrefix::new(packet.source_port.clone(), packet.source_channel); - let coin = { - let mut c = data.token; - c.denom.remove_trace_prefix(&prefix); - c - }; + if is_receiver_chain_source( + packet.source_port.clone(), + packet.source_channel, + &data.token.denom, + ) { + // sender chain is not the source, unescrow tokens + let prefix = TracePrefix::new(packet.source_port.clone(), packet.source_channel); + let coin = { + let mut c = data.token; + c.denom.remove_trace_prefix(&prefix); + c + }; - let escrow_address = - ctx.get_channel_escrow_address(&packet.destination_port, packet.destination_channel)?; + let escrow_address = + ctx.get_channel_escrow_address(&packet.destination_port, packet.destination_channel)?; - Ok(Box::new(move |ctx| { - let ctx = ctx.downcast_mut::().unwrap(); - ctx.send_coins(&escrow_address, &receiver_account, &coin) - .map_err(|e| e.to_string()) - })) - } else { - // sender chain is the source, mint vouchers - let prefix = TracePrefix::new(packet.destination_port.clone(), packet.destination_channel); - let coin = { - let mut c = data.token; - c.denom.add_trace_prefix(prefix); - c - }; + Ok(Box::new(move |ctx| { + let ctx = ctx.downcast_mut::().unwrap(); + ctx.send_coins(&escrow_address, &receiver_account, &coin) + .map_err(|e| e.to_string()) + })) + } else { + // sender chain is the source, mint vouchers + let prefix = TracePrefix::new(packet.destination_port.clone(), packet.destination_channel); + let coin = { + let mut c = data.token; + c.denom.add_trace_prefix(prefix); + c + }; - let denom_trace_event = DenomTraceEvent { - trace_hash: ctx.denom_hash_string(&coin.denom), - denom: coin.denom.clone(), - }; - output.emit(denom_trace_event.into()); + let denom_trace_event = DenomTraceEvent { + trace_hash: ctx.denom_hash_string(&coin.denom), + denom: coin.denom.clone(), + }; + output.emit(denom_trace_event.into()); - Ok(Box::new(move |ctx| { - let ctx = ctx.downcast_mut::().unwrap(); - ctx.mint_coins(&receiver_account, &coin) - .map_err(|e| e.to_string()) - })) - } + Ok(Box::new(move |ctx| { + let ctx = ctx.downcast_mut::().unwrap(); + ctx.mint_coins(&receiver_account, &coin).map_err(|e| e.to_string()) + })) + } } diff --git a/modules/src/applications/transfer/relay/on_timeout_packet.rs b/modules/src/applications/transfer/relay/on_timeout_packet.rs index 192a3dd9b6..8b1f3f12c6 100644 --- a/modules/src/applications/transfer/relay/on_timeout_packet.rs +++ b/modules/src/applications/transfer/relay/on_timeout_packet.rs @@ -1,13 +1,15 @@ -use crate::applications::transfer::context::Ics20Context; -use crate::applications::transfer::error::Error as Ics20Error; -use crate::applications::transfer::packet::PacketData; -use crate::applications::transfer::relay::refund_packet_token; -use crate::core::ics04_channel::packet::Packet; +use crate::{ + applications::transfer::{ + context::Ics20Context, error::Error as Ics20Error, packet::PacketData, + relay::refund_packet_token, + }, + core::ics04_channel::packet::Packet, +}; pub fn process_timeout_packet( - ctx: &mut impl Ics20Context, - packet: &Packet, - data: &PacketData, + ctx: &mut impl Ics20Context, + packet: &Packet, + data: &PacketData, ) -> Result<(), Ics20Error> { - refund_packet_token(ctx, packet, data) + refund_packet_token(ctx, packet, data) } diff --git a/modules/src/applications/transfer/relay/send_transfer.rs b/modules/src/applications/transfer/relay/send_transfer.rs index 6925c6776c..37e2d5bd60 100644 --- a/modules/src/applications/transfer/relay/send_transfer.rs +++ b/modules/src/applications/transfer/relay/send_transfer.rs @@ -1,115 +1,89 @@ -use crate::applications::transfer::context::Ics20Context; -use crate::applications::transfer::error::Error; -use crate::applications::transfer::events::TransferEvent; -use crate::applications::transfer::msgs::transfer::MsgTransfer; -use crate::applications::transfer::packet::PacketData; -use crate::applications::transfer::{is_sender_chain_source, Coin, PrefixedCoin}; -use crate::core::ics04_channel::handler::send_packet::send_packet; -use crate::core::ics04_channel::packet::Packet; -use crate::events::ModuleEvent; -use crate::handler::{HandlerOutput, HandlerOutputBuilder}; -use crate::prelude::*; +use crate::{ + applications::transfer::{ + context::Ics20Context, error::Error, events::TransferEvent, is_sender_chain_source, + msgs::transfer::MsgTransfer, packet::PacketData, Coin, PrefixedCoin, + }, + core::ics04_channel::{handler::send_packet::send_packet, packet::Packet}, + events::ModuleEvent, + handler::{HandlerOutput, HandlerOutputBuilder}, + prelude::*, +}; /// This function handles the transfer sending logic. /// If this method returns an error, the runtime is expected to rollback all state modifications to /// the `Ctx` caused by all messages from the transaction that this `msg` is a part of. pub fn send_transfer( - ctx: &mut Ctx, - output: &mut HandlerOutputBuilder<()>, - msg: MsgTransfer, + ctx: &mut Ctx, + output: &mut HandlerOutputBuilder<()>, + msg: MsgTransfer, ) -> Result<(), Error> where - Ctx: Ics20Context, - C: TryInto, + Ctx: Ics20Context, + C: TryInto, { - if !ctx.is_send_enabled() { - return Err(Error::send_disabled()); - } + if !ctx.is_send_enabled() { + return Err(Error::send_disabled()) + } - let source_channel_end = ctx - .channel_end(&(msg.source_port.clone(), msg.source_channel)) - .map_err(Error::ics04_channel)?; + let source_channel_end = ctx + .channel_end(&(msg.source_port.clone(), msg.source_channel)) + .map_err(Error::ics04_channel)?; - let destination_port = source_channel_end.counterparty().port_id().clone(); - let destination_channel = *source_channel_end - .counterparty() - .channel_id() - .ok_or_else(|| { - Error::destination_channel_not_found(msg.source_port.clone(), msg.source_channel) - })?; + let destination_port = source_channel_end.counterparty().port_id().clone(); + let destination_channel = *source_channel_end.counterparty().channel_id().ok_or_else(|| { + Error::destination_channel_not_found(msg.source_port.clone(), msg.source_channel) + })?; - // get the next sequence - let sequence = ctx - .get_next_sequence_send(&(msg.source_port.clone(), msg.source_channel)) - .map_err(Error::ics04_channel)?; + // get the next sequence + let sequence = ctx + .get_next_sequence_send(&(msg.source_port.clone(), msg.source_channel)) + .map_err(Error::ics04_channel)?; - let token = msg.token.try_into().map_err(|_| Error::invalid_token())?; - let denom = token.denom.clone(); - let coin = Coin { - denom: denom.clone(), - amount: token.amount, - }; + let token = msg.token.try_into().map_err(|_| Error::invalid_token())?; + let denom = token.denom.clone(); + let coin = Coin { denom: denom.clone(), amount: token.amount }; - let sender = msg - .sender - .clone() - .try_into() - .map_err(|_| Error::parse_account_failure())?; + let sender = msg.sender.clone().try_into().map_err(|_| Error::parse_account_failure())?; - if is_sender_chain_source(msg.source_port.clone(), msg.source_channel, &denom) { - let escrow_address = - ctx.get_channel_escrow_address(&msg.source_port, msg.source_channel)?; - ctx.send_coins(&sender, &escrow_address, &coin)?; - } else { - ctx.burn_coins(&sender, &coin)?; - } + if is_sender_chain_source(msg.source_port.clone(), msg.source_channel, &denom) { + let escrow_address = + ctx.get_channel_escrow_address(&msg.source_port, msg.source_channel)?; + ctx.send_coins(&sender, &escrow_address, &coin)?; + } else { + ctx.burn_coins(&sender, &coin)?; + } - let data = { - let data = PacketData { - token: coin, - sender: msg.sender.clone(), - receiver: msg.receiver.clone(), - }; - serde_json::to_vec(&data).expect("PacketData's infallible Serialize impl failed") - }; + let data = { + let data = + PacketData { token: coin, sender: msg.sender.clone(), receiver: msg.receiver.clone() }; + serde_json::to_vec(&data).expect("PacketData's infallible Serialize impl failed") + }; - let packet = Packet { - sequence, - source_port: msg.source_port, - source_channel: msg.source_channel, - destination_port, - destination_channel, - data, - timeout_height: msg.timeout_height, - timeout_timestamp: msg.timeout_timestamp, - }; + let packet = Packet { + sequence, + source_port: msg.source_port, + source_channel: msg.source_channel, + destination_port, + destination_channel, + data, + timeout_height: msg.timeout_height, + timeout_timestamp: msg.timeout_timestamp, + }; - let HandlerOutput { - result, - log, - events, - } = send_packet(ctx, packet).map_err(Error::ics04_channel)?; + let HandlerOutput { result, log, events } = + send_packet(ctx, packet).map_err(Error::ics04_channel)?; - ctx.store_packet_result(result) - .map_err(Error::ics04_channel)?; + ctx.store_packet_result(result).map_err(Error::ics04_channel)?; - output.merge_output( - HandlerOutput::builder() - .with_log(log) - .with_events(events) - .with_result(()), - ); + output.merge_output(HandlerOutput::builder().with_log(log).with_events(events).with_result(())); - output.log(format!( - "IBC fungible token transfer: {} --({})--> {}", - msg.sender, token, msg.receiver - )); + output.log(format!( + "IBC fungible token transfer: {} --({})--> {}", + msg.sender, token, msg.receiver + )); - let transfer_event = TransferEvent { - sender: msg.sender, - receiver: msg.receiver, - }; - output.emit(ModuleEvent::from(transfer_event).into()); + let transfer_event = TransferEvent { sender: msg.sender, receiver: msg.receiver }; + output.emit(ModuleEvent::from(transfer_event).into()); - Ok(()) + Ok(()) } diff --git a/modules/src/bigint.rs b/modules/src/bigint.rs index f681f75308..4b33e02332 100644 --- a/modules/src/bigint.rs +++ b/modules/src/bigint.rs @@ -1,8 +1 @@ -#![allow(clippy::assign_op_pattern)] -#![allow(clippy::ptr_offset_with_cast)] - -use uint::construct_uint; - -construct_uint! { - pub struct U256(4); -} +pub use primitive_types::U256; diff --git a/modules/src/clients/ics07_tendermint/client_def.rs b/modules/src/clients/ics07_tendermint/client_def.rs deleted file mode 100644 index 9f81f1c2d7..0000000000 --- a/modules/src/clients/ics07_tendermint/client_def.rs +++ /dev/null @@ -1,485 +0,0 @@ -use core::convert::TryInto; - -use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; -use prost::Message; -use tendermint_light_client_verifier::types::{TrustedBlockState, UntrustedBlockState}; -use tendermint_light_client_verifier::{ProdVerifier, Verdict, Verifier}; -use tendermint_proto::Protobuf; - -use crate::clients::ics07_tendermint::client_state::ClientState; -use crate::clients::ics07_tendermint::consensus_state::ConsensusState; -use crate::clients::ics07_tendermint::error::Error; -use crate::clients::ics07_tendermint::header::Header; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_def::ClientDef; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::context::ClientReader; -use crate::core::ics02_client::error::Error as Ics02Error; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::context::ChannelReader; -use crate::core::ics04_channel::packet::Sequence; -use crate::core::ics23_commitment::commitment::{ - CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, -}; -use crate::core::ics23_commitment::merkle::{apply_prefix, MerkleProof}; -use crate::core::ics24_host::identifier::ConnectionId; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, PortId}; -use crate::core::ics24_host::path::{ - AcksPath, ChannelEndsPath, ClientConsensusStatePath, ClientStatePath, CommitmentsPath, - ConnectionsPath, ReceiptsPath, SeqRecvsPath, -}; -use crate::core::ics24_host::Path; -use crate::downcast; -use crate::prelude::*; -use crate::Height; - -#[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct TendermintClient { - verifier: ProdVerifier, -} - -impl ClientDef for TendermintClient { - type Header = Header; - type ClientState = ClientState; - type ConsensusState = ConsensusState; - - fn check_header_and_update_state( - &self, - ctx: &dyn ClientReader, - client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result<(Self::ClientState, Self::ConsensusState), Ics02Error> { - if header.height().revision_number != client_state.chain_id.version() { - return Err(Ics02Error::tendermint_handler_error( - Error::mismatched_revisions( - client_state.chain_id.version(), - header.height().revision_number, - ), - )); - } - - // Check if a consensus state is already installed; if so it should - // match the untrusted header. - let header_consensus_state = ConsensusState::from(header.clone()); - let existing_consensus_state = - match ctx.maybe_consensus_state(&client_id, header.height())? { - Some(cs) => { - let cs = downcast_consensus_state(cs)?; - // If this consensus state matches, skip verification - // (optimization) - if cs == header_consensus_state { - // Header is already installed and matches the incoming - // header (already verified) - return Ok((client_state, cs)); - } - Some(cs) - } - None => None, - }; - - let trusted_consensus_state = - downcast_consensus_state(ctx.consensus_state(&client_id, header.trusted_height)?)?; - - let trusted_state = TrustedBlockState { - header_time: trusted_consensus_state.timestamp, - height: header - .trusted_height - .revision_height - .try_into() - .map_err(|_| { - Ics02Error::tendermint_handler_error(Error::invalid_header_height( - header.trusted_height, - )) - })?, - next_validators: &header.trusted_validator_set, - next_validators_hash: trusted_consensus_state.next_validators_hash, - }; - - let untrusted_state = UntrustedBlockState { - signed_header: &header.signed_header, - validators: &header.validator_set, - // NB: This will skip the - // VerificationPredicates::next_validators_match check for the - // untrusted state. - next_validators: None, - }; - - let options = client_state.as_light_client_options()?; - - let verdict = self.verifier.verify( - untrusted_state, - trusted_state, - &options, - ctx.host_timestamp().into_tm_time().unwrap(), - ); - - match verdict { - Verdict::Success => {} - Verdict::NotEnoughTrust(voting_power_tally) => { - return Err(Error::not_enough_trusted_vals_signed(format!( - "voting power tally: {}", - voting_power_tally - )) - .into()) - } - Verdict::Invalid(detail) => { - return Err(Ics02Error::tendermint_handler_error( - Error::verification_error(detail), - )) - } - } - - // If the header has verified, but its corresponding consensus state - // differs from the existing consensus state for that height, freeze the - // client and return the installed consensus state. - if let Some(cs) = existing_consensus_state { - if cs != header_consensus_state { - return Ok((client_state.with_frozen_height(header.height())?, cs)); - } - } - - // Monotonicity checks for timestamps for in-the-middle updates - // (cs-new, cs-next, cs-latest) - if header.height() < client_state.latest_height() { - let maybe_next_cs = ctx - .next_consensus_state(&client_id, header.height())? - .map(downcast_consensus_state) - .transpose()?; - - if let Some(next_cs) = maybe_next_cs { - // New (untrusted) header timestamp cannot occur after next - // consensus state's height - if header.signed_header.header().time > next_cs.timestamp { - return Err(Ics02Error::tendermint_handler_error( - Error::header_timestamp_too_high( - header.signed_header.header().time.to_string(), - next_cs.timestamp.to_string(), - ), - )); - } - } - } - // (cs-trusted, cs-prev, cs-new) - if header.trusted_height < header.height() { - let maybe_prev_cs = ctx - .prev_consensus_state(&client_id, header.height())? - .map(downcast_consensus_state) - .transpose()?; - - if let Some(prev_cs) = maybe_prev_cs { - // New (untrusted) header timestamp cannot occur before the - // previous consensus state's height - if header.signed_header.header().time < prev_cs.timestamp { - return Err(Ics02Error::tendermint_handler_error( - Error::header_timestamp_too_low( - header.signed_header.header().time.to_string(), - prev_cs.timestamp.to_string(), - ), - )); - } - } - } - - Ok(( - client_state.with_header(header.clone()), - ConsensusState::from(header), - )) - } - - fn verify_client_consensus_state( - &self, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - client_id: &ClientId, - consensus_height: Height, - expected_consensus_state: &AnyConsensusState, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; - - let path = ClientConsensusStatePath { - client_id: client_id.clone(), - epoch: consensus_height.revision_number, - height: consensus_height.revision_height, - }; - let value = expected_consensus_state - .encode_vec() - .map_err(Ics02Error::invalid_any_consensus_state)?; - verify_membership(client_state, prefix, proof, root, path, value) - } - - fn verify_connection_state( - &self, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - connection_id: &ConnectionId, - expected_connection_end: &ConnectionEnd, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; - - let path = ConnectionsPath(connection_id.clone()); - let value = expected_connection_end - .encode_vec() - .map_err(Ics02Error::invalid_connection_end)?; - verify_membership(client_state, prefix, proof, root, path, value) - } - - fn verify_channel_state( - &self, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - expected_channel_end: &ChannelEnd, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; - - let path = ChannelEndsPath(port_id.clone(), *channel_id); - let value = expected_channel_end - .encode_vec() - .map_err(Ics02Error::invalid_channel_end)?; - verify_membership(client_state, prefix, proof, root, path, value) - } - - fn verify_client_full_state( - &self, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - client_id: &ClientId, - expected_client_state: &AnyClientState, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; - - let path = ClientStatePath(client_id.clone()); - let value = expected_client_state - .encode_vec() - .map_err(Ics02Error::invalid_any_client_state)?; - verify_membership(client_state, prefix, proof, root, path, value) - } - - fn verify_packet_data( - &self, - ctx: &dyn ChannelReader, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - commitment: PacketCommitment, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; - - let commitment_path = CommitmentsPath { - port_id: port_id.clone(), - channel_id: *channel_id, - sequence, - }; - - verify_membership( - client_state, - connection_end.counterparty().prefix(), - proof, - root, - commitment_path, - commitment.into_vec(), - ) - } - - fn verify_packet_acknowledgement( - &self, - ctx: &dyn ChannelReader, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ack_commitment: AcknowledgementCommitment, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; - - let ack_path = AcksPath { - port_id: port_id.clone(), - channel_id: *channel_id, - sequence, - }; - verify_membership( - client_state, - connection_end.counterparty().prefix(), - proof, - root, - ack_path, - ack_commitment.into_vec(), - ) - } - - fn verify_next_sequence_recv( - &self, - ctx: &dyn ChannelReader, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; - - let mut seq_bytes = Vec::new(); - u64::from(sequence) - .encode(&mut seq_bytes) - .expect("buffer size too small"); - - let seq_path = SeqRecvsPath(port_id.clone(), *channel_id); - verify_membership( - client_state, - connection_end.counterparty().prefix(), - proof, - root, - seq_path, - seq_bytes, - ) - } - - fn verify_packet_receipt_absence( - &self, - ctx: &dyn ChannelReader, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ) -> Result<(), Ics02Error> { - client_state.verify_height(height)?; - verify_delay_passed(ctx, height, connection_end)?; - - let receipt_path = ReceiptsPath { - port_id: port_id.clone(), - channel_id: *channel_id, - sequence, - }; - verify_non_membership( - client_state, - connection_end.counterparty().prefix(), - proof, - root, - receipt_path, - ) - } - - fn verify_upgrade_and_update_state( - &self, - _client_state: &Self::ClientState, - _consensus_state: &Self::ConsensusState, - _proof_upgrade_client: RawMerkleProof, - _proof_upgrade_consensus_state: RawMerkleProof, - ) -> Result<(Self::ClientState, Self::ConsensusState), Ics02Error> { - todo!() - } -} - -fn verify_membership( - client_state: &ClientState, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - path: impl Into, - value: Vec, -) -> Result<(), Ics02Error> { - let merkle_path = apply_prefix(prefix, vec![path.into().to_string()]); - let merkle_proof: MerkleProof = RawMerkleProof::try_from(proof.clone()) - .map_err(Ics02Error::invalid_commitment_proof)? - .into(); - - merkle_proof - .verify_membership( - &client_state.proof_specs, - root.clone().into(), - merkle_path, - value, - 0, - ) - .map_err(|e| Ics02Error::tendermint(Error::ics23_error(e))) -} - -fn verify_non_membership( - client_state: &ClientState, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - path: impl Into, -) -> Result<(), Ics02Error> { - let merkle_path = apply_prefix(prefix, vec![path.into().to_string()]); - let merkle_proof: MerkleProof = RawMerkleProof::try_from(proof.clone()) - .map_err(Ics02Error::invalid_commitment_proof)? - .into(); - - merkle_proof - .verify_non_membership(&client_state.proof_specs, root.clone().into(), merkle_path) - .map_err(|e| Ics02Error::tendermint(Error::ics23_error(e))) -} - -fn verify_delay_passed( - ctx: &dyn ChannelReader, - height: Height, - connection_end: &ConnectionEnd, -) -> Result<(), Ics02Error> { - let current_timestamp = ctx.host_timestamp(); - let current_height = ctx.host_height(); - - let client_id = connection_end.client_id(); - let processed_time = ctx - .client_update_time(client_id, height) - .map_err(|_| Error::processed_time_not_found(client_id.clone(), height))?; - let processed_height = ctx - .client_update_height(client_id, height) - .map_err(|_| Error::processed_height_not_found(client_id.clone(), height))?; - - let delay_period_time = connection_end.delay_period(); - let delay_period_height = ctx.block_delay(delay_period_time); - - ClientState::verify_delay_passed( - current_timestamp, - current_height, - processed_time, - processed_height, - delay_period_time, - delay_period_height, - ) - .map_err(|e| e.into()) -} - -fn downcast_consensus_state(cs: AnyConsensusState) -> Result { - downcast!( - cs => AnyConsensusState::Tendermint - ) - .ok_or_else(|| Ics02Error::client_args_type_mismatch(ClientType::Tendermint)) -} diff --git a/modules/src/clients/ics07_tendermint/client_state.rs b/modules/src/clients/ics07_tendermint/client_state.rs deleted file mode 100644 index 5205b13687..0000000000 --- a/modules/src/clients/ics07_tendermint/client_state.rs +++ /dev/null @@ -1,674 +0,0 @@ -use crate::prelude::*; - -use core::convert::{TryFrom, TryInto}; -use core::time::Duration; - -use serde::{Deserialize, Serialize}; -use tendermint_light_client_verifier::options::Options; -use tendermint_proto::Protobuf; - -use ibc_proto::ibc::lightclients::tendermint::v1::ClientState as RawClientState; - -use crate::clients::ics07_tendermint::error::Error; -use crate::clients::ics07_tendermint::header::Header; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error as Ics02Error; -use crate::core::ics02_client::trust_threshold::TrustThreshold; -use crate::core::ics23_commitment::specs::ProofSpecs; -use crate::core::ics24_host::identifier::ChainId; -use crate::timestamp::{Timestamp, ZERO_DURATION}; -use crate::Height; - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct ClientState { - pub chain_id: ChainId, - pub trust_level: TrustThreshold, - pub trusting_period: Duration, - pub unbonding_period: Duration, - pub max_clock_drift: Duration, - pub latest_height: Height, - pub proof_specs: ProofSpecs, - pub upgrade_path: Vec, - pub allow_update: AllowUpdate, - pub frozen_height: Option, -} - -#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct AllowUpdate { - pub after_expiry: bool, - pub after_misbehaviour: bool, -} - -impl Protobuf for ClientState {} - -impl ClientState { - #[allow(clippy::too_many_arguments)] - pub fn new( - chain_id: ChainId, - trust_level: TrustThreshold, - trusting_period: Duration, - unbonding_period: Duration, - max_clock_drift: Duration, - latest_height: Height, - proof_specs: ProofSpecs, - upgrade_path: Vec, - allow_update: AllowUpdate, - ) -> Result { - // Basic validation of trusting period and unbonding period: each should be non-zero. - if trusting_period <= Duration::new(0, 0) { - return Err(Error::invalid_trusting_period(format!( - "ClientState trusting period ({:?}) must be greater than zero", - trusting_period - ))); - } - - if unbonding_period <= Duration::new(0, 0) { - return Err(Error::invalid_unbonding_period(format!( - "ClientState unbonding period ({:?}) must be greater than zero", - unbonding_period - ))); - } - - if trusting_period >= unbonding_period { - return Err(Error::invalid_trusting_period(format!( - "ClientState trusting period ({:?}) must be smaller than unbonding period ({:?})", - trusting_period, unbonding_period, - ))); - } - - // Basic validation for the latest_height parameter. - if latest_height <= Height::zero() { - return Err(Error::validation( - "ClientState latest height must be greater than zero".to_string(), - )); - } - - // `TrustThreshold` is guaranteed to be in the range `[0, 1)`, but a `TrustThreshold::ZERO` - // value is invalid in this context - if trust_level == TrustThreshold::ZERO { - return Err(Error::validation( - "ClientState trust-level cannot be zero".to_string(), - )); - } - - // Disallow empty proof-specs - if proof_specs.is_empty() { - return Err(Error::validation( - "ClientState proof-specs cannot be empty".to_string(), - )); - } - - Ok(Self { - chain_id, - trust_level, - trusting_period, - unbonding_period, - max_clock_drift, - latest_height, - proof_specs, - upgrade_path, - allow_update, - frozen_height: None, - }) - } - - pub fn latest_height(&self) -> Height { - self.latest_height - } - - pub fn with_header(self, h: Header) -> Self { - // TODO: Clarify which fields should update. - ClientState { - latest_height: self - .latest_height - .with_revision_height(u64::from(h.signed_header.header.height)), - ..self - } - } - - pub fn with_frozen_height(self, h: Height) -> Result { - if h == Height::zero() { - return Err(Error::validation( - "ClientState frozen height must be greater than zero".to_string(), - )); - } - Ok(Self { - frozen_height: Some(h), - ..self - }) - } - - /// Get the refresh time to ensure the state does not expire - pub fn refresh_time(&self) -> Option { - Some(2 * self.trusting_period / 3) - } - - /// Check if the state is expired when `elapsed` time has passed since the latest consensus - /// state timestamp - pub fn expired(&self, elapsed: Duration) -> bool { - elapsed > self.trusting_period - } - - /// Helper method to produce a [`Options`] struct for use in - /// Tendermint-specific light client verification. - pub fn as_light_client_options(&self) -> Result { - Ok(Options { - trust_threshold: self - .trust_level - .try_into() - .map_err(|e: Ics02Error| Error::invalid_trust_threshold(e.to_string()))?, - trusting_period: self.trusting_period, - clock_drift: self.max_clock_drift, - }) - } - - /// Verify the time and height delays - pub fn verify_delay_passed( - current_time: Timestamp, - current_height: Height, - processed_time: Timestamp, - processed_height: Height, - delay_period_time: Duration, - delay_period_blocks: u64, - ) -> Result<(), Error> { - let earliest_time = - (processed_time + delay_period_time).map_err(Error::timestamp_overflow)?; - if !(current_time == earliest_time || current_time.after(&earliest_time)) { - return Err(Error::not_enough_time_elapsed(current_time, earliest_time)); - } - - let earliest_height = processed_height.add(delay_period_blocks); - if current_height < earliest_height { - return Err(Error::not_enough_blocks_elapsed( - current_height, - earliest_height, - )); - } - - Ok(()) - } - - /// Verify that the client is at a sufficient height and unfrozen at the given height - pub fn verify_height(&self, height: Height) -> Result<(), Error> { - if self.latest_height < height { - return Err(Error::insufficient_height(self.latest_height(), height)); - } - - match self.frozen_height { - Some(frozen_height) if frozen_height <= height => { - Err(Error::client_frozen(frozen_height, height)) - } - _ => Ok(()), - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct UpgradeOptions { - pub unbonding_period: Duration, -} - -impl crate::core::ics02_client::client_state::ClientState for ClientState { - type UpgradeOptions = UpgradeOptions; - - fn chain_id(&self) -> ChainId { - self.chain_id.clone() - } - - fn client_type(&self) -> ClientType { - ClientType::Tendermint - } - - fn latest_height(&self) -> Height { - self.latest_height - } - - fn frozen_height(&self) -> Option { - self.frozen_height - } - - fn upgrade( - mut self, - upgrade_height: Height, - upgrade_options: UpgradeOptions, - chain_id: ChainId, - ) -> Self { - // Reset custom fields to zero values - self.trusting_period = ZERO_DURATION; - self.trust_level = TrustThreshold::ZERO; - self.allow_update.after_expiry = false; - self.allow_update.after_misbehaviour = false; - self.frozen_height = None; - self.max_clock_drift = ZERO_DURATION; - - // Upgrade the client state - self.latest_height = upgrade_height; - self.unbonding_period = upgrade_options.unbonding_period; - self.chain_id = chain_id; - - self - } - - fn wrap_any(self) -> AnyClientState { - AnyClientState::Tendermint(self) - } -} - -impl TryFrom for ClientState { - type Error = Error; - - fn try_from(raw: RawClientState) -> Result { - let trust_level = raw - .trust_level - .clone() - .ok_or_else(Error::missing_trusting_period)?; - - let frozen_height = raw.frozen_height.and_then(|raw_height| { - let height = raw_height.into(); - if height == Height::zero() { - None - } else { - Some(height) - } - }); - - Ok(Self { - chain_id: ChainId::from_string(raw.chain_id.as_str()), - trust_level: trust_level - .try_into() - .map_err(|e| Error::invalid_trust_threshold(format!("{}", e)))?, - trusting_period: raw - .trusting_period - .ok_or_else(Error::missing_trusting_period)? - .try_into() - .map_err(|_| Error::negative_trusting_period())?, - unbonding_period: raw - .unbonding_period - .ok_or_else(Error::missing_unbonding_period)? - .try_into() - .map_err(|_| Error::negative_unbonding_period())?, - max_clock_drift: raw - .max_clock_drift - .ok_or_else(Error::missing_max_clock_drift)? - .try_into() - .map_err(|_| Error::negative_max_clock_drift())?, - latest_height: raw - .latest_height - .ok_or_else(Error::missing_latest_height)? - .into(), - frozen_height, - upgrade_path: raw.upgrade_path, - allow_update: AllowUpdate { - after_expiry: raw.allow_update_after_expiry, - after_misbehaviour: raw.allow_update_after_misbehaviour, - }, - proof_specs: raw.proof_specs.into(), - }) - } -} - -impl From for RawClientState { - fn from(value: ClientState) -> Self { - RawClientState { - chain_id: value.chain_id.to_string(), - trust_level: Some(value.trust_level.into()), - trusting_period: Some(value.trusting_period.into()), - unbonding_period: Some(value.unbonding_period.into()), - max_clock_drift: Some(value.max_clock_drift.into()), - frozen_height: Some(value.frozen_height.unwrap_or_else(Height::zero).into()), - latest_height: Some(value.latest_height.into()), - proof_specs: value.proof_specs.into(), - allow_update_after_expiry: value.allow_update.after_expiry, - allow_update_after_misbehaviour: value.allow_update.after_misbehaviour, - upgrade_path: value.upgrade_path, - } - } -} - -#[cfg(test)] -mod tests { - use crate::prelude::*; - use crate::Height; - use core::time::Duration; - use test_log::test; - - use ibc_proto::ics23::ProofSpec as Ics23ProofSpec; - use tendermint_rpc::endpoint::abci_query::AbciQuery; - - use crate::clients::ics07_tendermint::client_state::{AllowUpdate, ClientState}; - use crate::core::ics02_client::trust_threshold::TrustThreshold; - use crate::core::ics23_commitment::specs::ProofSpecs; - use crate::core::ics24_host::identifier::ChainId; - use crate::test::test_serialization_roundtrip; - use crate::timestamp::{Timestamp, ZERO_DURATION}; - - #[derive(Clone, Debug, PartialEq)] - struct ClientStateParams { - id: ChainId, - trust_level: TrustThreshold, - trusting_period: Duration, - unbonding_period: Duration, - max_clock_drift: Duration, - latest_height: Height, - proof_specs: ProofSpecs, - upgrade_path: Vec, - allow_update: AllowUpdate, - } - - #[test] - fn serialization_roundtrip_no_proof() { - let json_data = - include_str!("../../../tests/support/query/serialization/client_state.json"); - test_serialization_roundtrip::(json_data); - } - - #[test] - fn serialization_roundtrip_with_proof() { - let json_data = - include_str!("../../../tests/support/query/serialization/client_state_proof.json"); - test_serialization_roundtrip::(json_data); - } - - #[test] - fn client_state_new() { - // Define a "default" set of parameters to reuse throughout these tests. - let default_params: ClientStateParams = ClientStateParams { - id: ChainId::default(), - trust_level: TrustThreshold::ONE_THIRD, - trusting_period: Duration::new(64000, 0), - unbonding_period: Duration::new(128000, 0), - max_clock_drift: Duration::new(3, 0), - latest_height: Height::new(0, 10), - proof_specs: ProofSpecs::default(), - upgrade_path: vec!["".to_string()], - allow_update: AllowUpdate { - after_expiry: false, - after_misbehaviour: false, - }, - }; - - struct Test { - name: String, - params: ClientStateParams, - want_pass: bool, - } - - let tests: Vec = vec![ - Test { - name: "Valid parameters".to_string(), - params: default_params.clone(), - want_pass: true, - }, - Test { - name: "Invalid unbonding period".to_string(), - params: ClientStateParams { - unbonding_period: ZERO_DURATION, - ..default_params.clone() - }, - want_pass: false, - }, - Test { - name: "Invalid (too small) trusting period".to_string(), - params: ClientStateParams { - trusting_period: ZERO_DURATION, - ..default_params.clone() - }, - want_pass: false, - }, - Test { - name: "Invalid (too large) trusting period w.r.t. unbonding period".to_string(), - params: ClientStateParams { - trusting_period: Duration::new(11, 0), - unbonding_period: Duration::new(10, 0), - ..default_params.clone() - }, - want_pass: false, - }, - Test { - name: "Invalid (too small) trusting trust threshold".to_string(), - params: ClientStateParams { - trust_level: TrustThreshold::ZERO, - ..default_params.clone() - }, - want_pass: false, - }, - Test { - name: "Invalid (too small) latest height".to_string(), - params: ClientStateParams { - latest_height: Height::zero(), - ..default_params.clone() - }, - want_pass: false, - }, - Test { - name: "Invalid (empty) proof specs".to_string(), - params: ClientStateParams { - proof_specs: ProofSpecs::from(Vec::::new()), - ..default_params - }, - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let p = test.params.clone(); - - let cs_result = ClientState::new( - p.id, - p.trust_level, - p.trusting_period, - p.unbonding_period, - p.max_clock_drift, - p.latest_height, - p.proof_specs, - p.upgrade_path, - p.allow_update, - ); - - assert_eq!( - test.want_pass, - cs_result.is_ok(), - "ClientState::new() failed for test {}, \nmsg{:?} with error {:?}", - test.name, - test.params.clone(), - cs_result.err(), - ); - } - } - - #[test] - fn client_state_verify_delay_passed() { - #[derive(Debug, Clone)] - struct Params { - current_time: Timestamp, - current_height: Height, - processed_time: Timestamp, - processed_height: Height, - delay_period_time: Duration, - delay_period_blocks: u64, - } - struct Test { - name: String, - params: Params, - want_pass: bool, - } - let now = Timestamp::now(); - - let tests: Vec = vec![ - Test { - name: "Successful delay verification".to_string(), - params: Params { - current_time: (now + Duration::from_nanos(2000)).unwrap(), - current_height: Height::new(0, 5), - processed_time: (now + Duration::from_nanos(1000)).unwrap(), - processed_height: Height::new(0, 3), - delay_period_time: Duration::from_nanos(500), - delay_period_blocks: 2, - }, - want_pass: true, - }, - Test { - name: "Delay period(time) has not elapsed".to_string(), - params: Params { - current_time: (now + Duration::from_nanos(1200)).unwrap(), - current_height: Height::new(0, 5), - processed_time: (now + Duration::from_nanos(1000)).unwrap(), - processed_height: Height::new(0, 3), - delay_period_time: Duration::from_nanos(500), - delay_period_blocks: 2, - }, - want_pass: false, - }, - Test { - name: "Delay period(blocks) has not elapsed".to_string(), - params: Params { - current_time: (now + Duration::from_nanos(2000)).unwrap(), - current_height: Height::new(0, 5), - processed_time: (now + Duration::from_nanos(1000)).unwrap(), - processed_height: Height::new(0, 4), - delay_period_time: Duration::from_nanos(500), - delay_period_blocks: 2, - }, - want_pass: false, - }, - ]; - - for test in tests { - let res = ClientState::verify_delay_passed( - test.params.current_time, - test.params.current_height, - test.params.processed_time, - test.params.processed_height, - test.params.delay_period_time, - test.params.delay_period_blocks, - ); - - assert_eq!( - test.want_pass, - res.is_ok(), - "ClientState::verify_delay_passed() failed for test {}, \nmsg{:?} with error {:?}", - test.name, - test.params.clone(), - res.err(), - ); - } - } - - #[test] - fn client_state_verify_height() { - // Define a "default" set of parameters to reuse throughout these tests. - let default_params: ClientStateParams = ClientStateParams { - id: ChainId::default(), - trust_level: TrustThreshold::ONE_THIRD, - trusting_period: Duration::new(64000, 0), - unbonding_period: Duration::new(128000, 0), - max_clock_drift: Duration::new(3, 0), - latest_height: Height::new(1, 10), - proof_specs: ProofSpecs::default(), - upgrade_path: vec!["".to_string()], - allow_update: AllowUpdate { - after_expiry: false, - after_misbehaviour: false, - }, - }; - - struct Test { - name: String, - height: Height, - setup: Option ClientState>>, - want_pass: bool, - } - - let tests = vec![ - Test { - name: "Successful height verification".to_string(), - height: Height::new(1, 8), - setup: None, - want_pass: true, - }, - Test { - name: "Invalid (too large) client height".to_string(), - height: Height::new(1, 12), - setup: None, - want_pass: false, - }, - Test { - name: "Invalid, client is frozen below current height".to_string(), - height: Height::new(1, 6), - setup: Some(Box::new(|client_state| { - client_state.with_frozen_height(Height::new(1, 5)).unwrap() - })), - want_pass: false, - }, - ]; - - for test in tests { - let p = default_params.clone(); - let client_state = ClientState::new( - p.id, - p.trust_level, - p.trusting_period, - p.unbonding_period, - p.max_clock_drift, - p.latest_height, - p.proof_specs, - p.upgrade_path, - p.allow_update, - ) - .unwrap(); - let client_state = match test.setup { - Some(setup) => (setup)(client_state), - _ => client_state, - }; - let res = client_state.verify_height(test.height); - - assert_eq!( - test.want_pass, - res.is_ok(), - "ClientState::verify_delay_height() failed for test {}, \nmsg{:?} with error {:?}", - test.name, - test.height, - res.err(), - ); - } - } -} - -#[cfg(any(test, feature = "mocks"))] -pub mod test_util { - use crate::prelude::*; - use core::time::Duration; - - use tendermint::block::Header; - - use crate::clients::ics07_tendermint::client_state::{AllowUpdate, ClientState}; - use crate::core::ics02_client::client_state::AnyClientState; - use crate::core::ics02_client::height::Height; - use crate::core::ics24_host::identifier::ChainId; - - pub fn get_dummy_tendermint_client_state(tm_header: Header) -> AnyClientState { - AnyClientState::Tendermint( - ClientState::new( - ChainId::from(tm_header.chain_id.clone()), - Default::default(), - Duration::from_secs(64000), - Duration::from_secs(128000), - Duration::from_millis(3000), - Height::new( - ChainId::chain_version(tm_header.chain_id.as_str()), - u64::from(tm_header.height), - ), - Default::default(), - vec!["".to_string()], - AllowUpdate { - after_expiry: false, - after_misbehaviour: false, - }, - ) - .unwrap(), - ) - } -} diff --git a/modules/src/clients/ics07_tendermint/consensus_state.rs b/modules/src/clients/ics07_tendermint/consensus_state.rs deleted file mode 100644 index f710a6bcce..0000000000 --- a/modules/src/clients/ics07_tendermint/consensus_state.rs +++ /dev/null @@ -1,135 +0,0 @@ -use crate::prelude::*; - -use core::convert::Infallible; - -use serde::Serialize; -use tendermint::{hash::Algorithm, time::Time, Hash}; -use tendermint_proto::google::protobuf as tpb; -use tendermint_proto::Protobuf; - -use ibc_proto::ibc::lightclients::tendermint::v1::ConsensusState as RawConsensusState; - -use crate::clients::ics07_tendermint::error::Error; -use crate::clients::ics07_tendermint::header::Header; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics23_commitment::commitment::CommitmentRoot; - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct ConsensusState { - pub timestamp: Time, - pub root: CommitmentRoot, - pub next_validators_hash: Hash, -} - -impl ConsensusState { - pub fn new(root: CommitmentRoot, timestamp: Time, next_validators_hash: Hash) -> Self { - Self { - timestamp, - root, - next_validators_hash, - } - } -} - -impl crate::core::ics02_client::client_consensus::ConsensusState for ConsensusState { - type Error = Infallible; - - fn client_type(&self) -> ClientType { - ClientType::Tendermint - } - - fn root(&self) -> &CommitmentRoot { - &self.root - } - - fn wrap_any(self) -> AnyConsensusState { - AnyConsensusState::Tendermint(self) - } -} - -impl Protobuf for ConsensusState {} - -impl TryFrom for ConsensusState { - type Error = Error; - - fn try_from(raw: RawConsensusState) -> Result { - let ibc_proto::google::protobuf::Timestamp { seconds, nanos } = raw - .timestamp - .ok_or_else(|| Error::invalid_raw_consensus_state("missing timestamp".into()))?; - // FIXME: shunts like this are necessary due to - // https://github.com/informalsystems/tendermint-rs/issues/1053 - let proto_timestamp = tpb::Timestamp { seconds, nanos }; - let timestamp = proto_timestamp - .try_into() - .map_err(|e| Error::invalid_raw_consensus_state(format!("invalid timestamp: {}", e)))?; - - Ok(Self { - root: raw - .root - .ok_or_else(|| { - Error::invalid_raw_consensus_state("missing commitment root".into()) - })? - .hash - .into(), - timestamp, - next_validators_hash: Hash::from_bytes(Algorithm::Sha256, &raw.next_validators_hash) - .map_err(|e| Error::invalid_raw_consensus_state(e.to_string()))?, - }) - } -} - -impl From for RawConsensusState { - fn from(value: ConsensusState) -> Self { - // FIXME: shunts like this are necessary due to - // https://github.com/informalsystems/tendermint-rs/issues/1053 - let tpb::Timestamp { seconds, nanos } = value.timestamp.into(); - let timestamp = ibc_proto::google::protobuf::Timestamp { seconds, nanos }; - - RawConsensusState { - timestamp: Some(timestamp), - root: Some(ibc_proto::ibc::core::commitment::v1::MerkleRoot { - hash: value.root.into_vec(), - }), - next_validators_hash: value.next_validators_hash.as_bytes().to_vec(), - } - } -} - -impl From for ConsensusState { - fn from(header: tendermint::block::Header) -> Self { - Self { - root: CommitmentRoot::from_bytes(header.app_hash.as_ref()), - timestamp: header.time, - next_validators_hash: header.next_validators_hash, - } - } -} - -impl From
for ConsensusState { - fn from(header: Header) -> Self { - Self::from(header.signed_header.header) - } -} - -#[cfg(test)] -mod tests { - use tendermint_rpc::endpoint::abci_query::AbciQuery; - use test_log::test; - - use crate::test::test_serialization_roundtrip; - - #[test] - fn serialization_roundtrip_no_proof() { - let json_data = - include_str!("../../../tests/support/query/serialization/consensus_state.json"); - test_serialization_roundtrip::(json_data); - } - - #[test] - fn serialization_roundtrip_with_proof() { - let json_data = - include_str!("../../../tests/support/query/serialization/consensus_state_proof.json"); - test_serialization_roundtrip::(json_data); - } -} diff --git a/modules/src/clients/ics07_tendermint/error.rs b/modules/src/clients/ics07_tendermint/error.rs deleted file mode 100644 index 0ef3b9f29c..0000000000 --- a/modules/src/clients/ics07_tendermint/error.rs +++ /dev/null @@ -1,294 +0,0 @@ -use crate::prelude::*; - -use flex_error::{define_error, TraceError}; - -use crate::core::ics23_commitment::error::Error as Ics23Error; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::ClientId; -use crate::timestamp::{Timestamp, TimestampOverflowError}; - -use crate::Height; -use tendermint::account::Id; -use tendermint::hash::Hash; -use tendermint::Error as TendermintError; -use tendermint_light_client_verifier::errors::VerificationErrorDetail as LightClientErrorDetail; - -define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - InvalidTrustingPeriod - { reason: String } - |e| { format_args!("invalid trusting period: {}", e.reason) }, - - InvalidUnbondingPeriod - { reason: String } - |e| { format_args!("invalid unbonding period: {}", e.reason) }, - - InvalidAddress - |_| { "invalid address" }, - - InvalidHeader - { reason: String } - [ TendermintError ] - |e| { format_args!("invalid header, failed basic validation: {}", e.reason) }, - - InvalidTrustThreshold - { reason: String } - |e| { format_args!("invalid client state trust threshold: {}", e.reason) }, - - MissingSignedHeader - |_| { "missing signed header" }, - - Validation - { reason: String } - |e| { format_args!("invalid header, failed basic validation: {}", e.reason) }, - - InvalidRawClientState - { reason: String } - |e| { format_args!("invalid raw client state: {}", e.reason) }, - - MissingValidatorSet - |_| { "missing validator set" }, - - MissingTrustedValidatorSet - |_| { "missing trusted validator set" }, - - MissingTrustedHeight - |_| { "missing trusted height" }, - - MissingTrustingPeriod - |_| { "missing trusting period" }, - - MissingUnbondingPeriod - |_| { "missing unbonding period" }, - - InvalidChainIdentifier - [ ValidationError ] - |_| { "invalid chain identifier" }, - - NegativeTrustingPeriod - |_| { "negative trusting period" }, - - NegativeUnbondingPeriod - |_| { "negative unbonding period" }, - - MissingMaxClockDrift - |_| { "missing max clock drift" }, - - NegativeMaxClockDrift - |_| { "negative max clock drift" }, - - MissingLatestHeight - |_| { "missing latest height" }, - - MissingFrozenHeight - |_| { "missing frozen height" }, - - InvalidChainId - { raw_value: String } - [ ValidationError ] - |e| { format_args!("invalid chain identifier: {}", e.raw_value) }, - - InvalidRawHeight - { raw_height: u64 } - |e| { format_args!("invalid raw height: {}", e.raw_height) }, - - InvalidRawConsensusState - { reason: String } - | e | { format_args!("invalid raw client consensus state: {}", e.reason) }, - - InvalidRawHeader - [ TendermintError ] - | _ | { "invalid raw header" }, - - InvalidRawMisbehaviour - { reason: String } - | e | { format_args!("invalid raw misbehaviour: {}", e.reason) }, - - Decode - [ TraceError ] - | _ | { "decode error" }, - - InsufficientVotingPower - { reason: String } - | e | { - format_args!("insufficient overlap: {}", e.reason) - }, - - LowUpdateTimestamp - { - low: String, - high: String - } - | e | { - format_args!("header timestamp {0} must be greater than current client consensus state timestamp {1}", e.low, e.high) - }, - - HeaderTimestampOutsideTrustingTime - { - low: String, - high: String - } - | e | { - format_args!("header timestamp {0} is outside the trusting period w.r.t. consensus state timestamp {1}", e.low, e.high) - }, - - HeaderTimestampTooHigh - { - actual: String, - max: String, - } - | e | { - format_args!("given other previous updates, header timestamp should be at most {0}, but was {1}", e.max, e.actual) - }, - - HeaderTimestampTooLow - { - actual: String, - min: String, - } - | e | { - format_args!("given other previous updates, header timestamp should be at least {0}, but was {1}", e.min, e.actual) - }, - - TimestampOverflow - [ TimestampOverflowError ] - |_| { "timestamp overflowed" }, - - NotEnoughTimeElapsed - { - current_time: Timestamp, - earliest_time: Timestamp, - } - | e | { - format_args!("not enough time elapsed, current timestamp {0} is still less than earliest acceptable timestamp {1}", e.current_time, e.earliest_time) - }, - - NotEnoughBlocksElapsed - { - current_height: Height, - earliest_height: Height, - } - | e | { - format_args!("not enough blocks elapsed, current height {0} is still less than earliest acceptable height {1}", e.current_height, e.earliest_height) - }, - - InvalidHeaderHeight - { height: Height } - | e | { - format_args!("header height = {0} is invalid", e.height) - }, - - InvalidTrustedHeaderHeight - { - trusted_header_height: Height, - height_header: Height - } - | e | { - format_args!("header height is {0} and is lower than the trusted header height, which is {1} ", e.height_header, e.trusted_header_height) - }, - - LowUpdateHeight - { - low: Height, - high: Height - } - | e | { - format_args!("header height is {0} but it must be greater than the current client height which is {1}", e.low, e.high) - }, - - MismatchedRevisions - { - current_revision: u64, - update_revision: u64, - } - | e | { - format_args!("the header's current/trusted revision number ({0}) and the update's revision number ({1}) should be the same", e.current_revision, e.update_revision) - }, - - InvalidValidatorSet - { - hash1: Hash, - hash2: Hash, - } - | e | { - format_args!("invalid validator set: header_validators_hash={} and validators_hash={}", e.hash1, e.hash2) - }, - - NotEnoughTrustedValsSigned - { reason: String } - | e | { - format_args!("not enough trust because insufficient validators overlap: {}", e.reason) - }, - - VerificationError - { detail: LightClientErrorDetail } - | e | { - format_args!("verification failed: {}", e.detail) - }, - - ProcessedTimeNotFound - { - client_id: ClientId, - height: Height, - } - | e | { - format_args!( - "Processed time for the client {0} at height {1} not found", - e.client_id, e.height) - }, - - ProcessedHeightNotFound - { - client_id: ClientId, - height: Height, - } - | e | { - format_args!( - "Processed height for the client {0} at height {1} not found", - e.client_id, e.height) - }, - - Ics23Error - [ Ics23Error ] - | _ | { "ics23 commitment error" }, - - InsufficientHeight - { - latest_height: Height, - target_height: Height, - } - | e | { - format_args!("the height is insufficient: latest_height={0} target_height={1}", e.latest_height, e.target_height) - }, - - ClientFrozen - { - frozen_height: Height, - target_height: Height, - } - | e | { - format_args!("the client is frozen: frozen_height={0} target_height={1}", e.frozen_height, e.target_height) - }, - } -} - -define_error! { - #[derive(Debug, PartialEq, Eq)] - VerificationError { - InvalidSignature - | _ | { "couldn't verify validator signature" }, - - DuplicateValidator - { id: Id } - | e | { - format_args!("duplicate validator in commit signatures with address {}", e.id) - }, - - InsufficientOverlap - { q1: u64, q2: u64 } - | e | { - format_args!("insufficient signers overlap between {0} and {1}", e.q1, e.q2) - }, - } -} diff --git a/modules/src/clients/ics07_tendermint/header.rs b/modules/src/clients/ics07_tendermint/header.rs deleted file mode 100644 index 8fe32105a5..0000000000 --- a/modules/src/clients/ics07_tendermint/header.rs +++ /dev/null @@ -1,206 +0,0 @@ -use core::cmp::Ordering; - -use bytes::Buf; -use prost::Message; -use serde_derive::{Deserialize, Serialize}; -use tendermint::block::signed_header::SignedHeader; -use tendermint::validator::Set as ValidatorSet; -use tendermint_proto::Protobuf; - -use crate::alloc::string::ToString; - -use ibc_proto::ibc::lightclients::tendermint::v1::Header as RawHeader; - -use crate::clients::ics07_tendermint::error::Error; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::header::AnyHeader; -use crate::core::ics24_host::identifier::ChainId; -use crate::timestamp::Timestamp; -use crate::Height; - -/// Tendermint consensus header -#[derive(Clone, PartialEq, Eq, Deserialize, Serialize)] -pub struct Header { - pub signed_header: SignedHeader, // contains the commitment root - pub validator_set: ValidatorSet, // the validator set that signed Header - pub trusted_height: Height, // the height of a trusted header seen by client less than or equal to Header - // TODO(thane): Rename this to trusted_next_validator_set? - pub trusted_validator_set: ValidatorSet, // the last trusted validator set at trusted height -} - -impl core::fmt::Debug for Header { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, " Header {{...}}") - } -} - -impl Header { - pub fn height(&self) -> Height { - Height::new( - ChainId::chain_version(self.signed_header.header.chain_id.as_str()), - u64::from(self.signed_header.header.height), - ) - } - - pub fn compatible_with(&self, other_header: &Header) -> bool { - headers_compatible(&self.signed_header, &other_header.signed_header) - } -} - -pub fn headers_compatible(header: &SignedHeader, other: &SignedHeader) -> bool { - let ibc_client_height = other.header.height; - let self_header_height = header.header.height; - - match self_header_height.cmp(&ibc_client_height) { - Ordering::Equal => { - // 1 - fork - header.commit.block_id == other.commit.block_id - } - Ordering::Greater => { - // 2 - BFT time violation - header.header.time > other.header.time - } - Ordering::Less => { - // 3 - BFT time violation - header.header.time < other.header.time - } - } -} - -impl crate::core::ics02_client::header::Header for Header { - fn client_type(&self) -> ClientType { - ClientType::Tendermint - } - - fn height(&self) -> Height { - self.height() - } - - fn timestamp(&self) -> Timestamp { - self.signed_header.header.time.into() - } - - fn wrap_any(self) -> AnyHeader { - AnyHeader::Tendermint(self) - } -} - -impl Protobuf for Header {} - -impl TryFrom for Header { - type Error = Error; - - fn try_from(raw: RawHeader) -> Result { - let header = Self { - signed_header: raw - .signed_header - .ok_or_else(Error::missing_signed_header)? - .try_into() - .map_err(|e| Error::invalid_header("signed header conversion".to_string(), e))?, - validator_set: raw - .validator_set - .ok_or_else(Error::missing_validator_set)? - .try_into() - .map_err(Error::invalid_raw_header)?, - trusted_height: raw - .trusted_height - .ok_or_else(Error::missing_trusted_height)? - .into(), - trusted_validator_set: raw - .trusted_validators - .ok_or_else(Error::missing_trusted_validator_set)? - .try_into() - .map_err(Error::invalid_raw_header)?, - }; - - if header.height().revision_number != header.trusted_height.revision_number { - return Err(Error::mismatched_revisions( - header.trusted_height.revision_number, - header.height().revision_number, - )); - } - - Ok(header) - } -} - -pub fn decode_header(buf: B) -> Result { - RawHeader::decode(buf).map_err(Error::decode)?.try_into() -} - -impl From
for RawHeader { - fn from(value: Header) -> Self { - RawHeader { - signed_header: Some(value.signed_header.into()), - validator_set: Some(value.validator_set.into()), - trusted_height: Some(value.trusted_height.into()), - trusted_validators: Some(value.trusted_validator_set.into()), - } - } -} - -#[cfg(test)] -pub mod test_util { - use alloc::vec; - - use subtle_encoding::hex; - use tendermint::block::signed_header::SignedHeader; - use tendermint::validator::Info as ValidatorInfo; - use tendermint::validator::Set as ValidatorSet; - use tendermint::PublicKey; - - use crate::clients::ics07_tendermint::header::Header; - use crate::Height; - - pub fn get_dummy_tendermint_header() -> tendermint::block::Header { - serde_json::from_str::(include_str!( - "../../../tests/support/signed_header.json" - )) - .unwrap() - .header - } - - // TODO: This should be replaced with a ::default() or ::produce(). - // The implementation of this function comprises duplicate code (code borrowed from - // `tendermint-rs` for assembling a Header). - // See https://github.com/informalsystems/tendermint-rs/issues/381. - // - // The normal flow is: - // - get the (trusted) signed header and the `trusted_validator_set` at a `trusted_height` - // - get the `signed_header` and the `validator_set` at latest height - // - build the ics07 Header - // For testing purposes this function does: - // - get the `signed_header` from a .json file - // - create the `validator_set` with a single validator that is also the proposer - // - assume a `trusted_height` of 1 and no change in the validator set since height 1, - // i.e. `trusted_validator_set` = `validator_set` - pub fn get_dummy_ics07_header() -> Header { - // Build a SignedHeader from a JSON file. - let shdr = serde_json::from_str::(include_str!( - "../../../tests/support/signed_header.json" - )) - .unwrap(); - - // Build a set of validators. - // Below are test values inspired form `test_validator_set()` in tendermint-rs. - let v1: ValidatorInfo = ValidatorInfo::new( - PublicKey::from_raw_ed25519( - &hex::decode_upper( - "F349539C7E5EF7C49549B09C4BFC2335318AB0FE51FBFAA2433B4F13E816F4A7", - ) - .unwrap(), - ) - .unwrap(), - 281_815_u64.try_into().unwrap(), - ); - - let vs = ValidatorSet::new(vec![v1.clone()], Some(v1)); - - Header { - signed_header: shdr, - validator_set: vs.clone(), - trusted_height: Height::new(0, 1), - trusted_validator_set: vs, - } - } -} diff --git a/modules/src/clients/ics07_tendermint/misbehaviour.rs b/modules/src/clients/ics07_tendermint/misbehaviour.rs deleted file mode 100644 index 016c3fde3e..0000000000 --- a/modules/src/clients/ics07_tendermint/misbehaviour.rs +++ /dev/null @@ -1,76 +0,0 @@ -use crate::prelude::*; - -use tendermint_proto::Protobuf; - -use ibc_proto::ibc::lightclients::tendermint::v1::Misbehaviour as RawMisbehaviour; - -use crate::clients::ics07_tendermint::error::Error; -use crate::clients::ics07_tendermint::header::Header; -use crate::core::ics02_client::misbehaviour::AnyMisbehaviour; -use crate::core::ics24_host::identifier::ClientId; -use crate::Height; - -#[derive(Clone, Debug, PartialEq)] -pub struct Misbehaviour { - pub client_id: ClientId, - pub header1: Header, - pub header2: Header, -} - -impl crate::core::ics02_client::misbehaviour::Misbehaviour for Misbehaviour { - fn client_id(&self) -> &ClientId { - &self.client_id - } - - fn height(&self) -> Height { - self.header1.height() - } - - fn wrap_any(self) -> AnyMisbehaviour { - AnyMisbehaviour::Tendermint(self) - } -} - -impl Protobuf for Misbehaviour {} - -impl TryFrom for Misbehaviour { - type Error = Error; - - fn try_from(raw: RawMisbehaviour) -> Result { - Ok(Self { - client_id: Default::default(), - header1: raw - .header_1 - .ok_or_else(|| Error::invalid_raw_misbehaviour("missing header1".into()))? - .try_into()?, - header2: raw - .header_2 - .ok_or_else(|| Error::invalid_raw_misbehaviour("missing header2".into()))? - .try_into()?, - }) - } -} - -impl From for RawMisbehaviour { - fn from(value: Misbehaviour) -> Self { - RawMisbehaviour { - client_id: value.client_id.to_string(), - header_1: Some(value.header1.into()), - header_2: Some(value.header2.into()), - } - } -} - -impl core::fmt::Display for Misbehaviour { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!( - f, - "{:?} h1: {:?}-{:?} h2: {:?}-{:?}", - self.client_id, - self.header1.height(), - self.header1.trusted_height, - self.header2.height(), - self.header2.trusted_height, - ) - } -} diff --git a/modules/src/clients/ics07_tendermint/mod.rs b/modules/src/clients/ics07_tendermint/mod.rs deleted file mode 100644 index 3b32d8e62f..0000000000 --- a/modules/src/clients/ics07_tendermint/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! ICS 07: Tendermint Client implements a client verification algorithm for blockchains which use -//! the Tendermint consensus algorithm. - -pub mod client_def; -pub mod client_state; -pub mod consensus_state; -pub mod error; -pub mod header; -pub mod misbehaviour; diff --git a/modules/src/clients/mod.rs b/modules/src/clients/mod.rs deleted file mode 100644 index 65ea910b18..0000000000 --- a/modules/src/clients/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! Implementations of client verification algorithms for specific types of chains. - -pub mod ics07_tendermint; diff --git a/modules/src/core/ics02_client/client_consensus.rs b/modules/src/core/ics02_client/client_consensus.rs index e1ee92eaac..81790a244e 100644 --- a/modules/src/core/ics02_client/client_consensus.rs +++ b/modules/src/core/ics02_client/client_consensus.rs @@ -1,177 +1,45 @@ -use crate::prelude::*; - -use core::convert::Infallible; -use core::marker::{Send, Sync}; - -use ibc_proto::google::protobuf::Any; -use ibc_proto::ibc::core::client::v1::ConsensusStateWithHeight; -use serde::Serialize; -use tendermint_proto::Protobuf; - -use crate::clients::ics07_tendermint::consensus_state; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::height::Height; -use crate::core::ics23_commitment::commitment::CommitmentRoot; -use crate::core::ics24_host::identifier::ClientId; -use crate::events::WithBlockDataType; -use crate::timestamp::Timestamp; - -#[cfg(any(test, feature = "mocks"))] -use crate::mock::client_state::MockConsensusState; - -pub const TENDERMINT_CONSENSUS_STATE_TYPE_URL: &str = - "/ibc.lightclients.tendermint.v1.ConsensusState"; - -pub const MOCK_CONSENSUS_STATE_TYPE_URL: &str = "/ibc.mock.ConsensusState"; - -pub trait ConsensusState: Clone + core::fmt::Debug + Send + Sync { - type Error; - - /// Type of client associated with this consensus state (eg. Tendermint) - fn client_type(&self) -> ClientType; - - /// Commitment root of the consensus state, which is used for key-value pair verification. - fn root(&self) -> &CommitmentRoot; - - /// Wrap into an `AnyConsensusState` - fn wrap_any(self) -> AnyConsensusState; -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -#[serde(tag = "type")] -pub enum AnyConsensusState { - Tendermint(consensus_state::ConsensusState), - - #[cfg(any(test, feature = "mocks"))] - Mock(MockConsensusState), -} - -impl AnyConsensusState { - pub fn timestamp(&self) -> Timestamp { - match self { - Self::Tendermint(cs_state) => cs_state.timestamp.into(), - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(mock_state) => mock_state.timestamp(), - } - } - - pub fn client_type(&self) -> ClientType { - match self { - AnyConsensusState::Tendermint(_cs) => ClientType::Tendermint, - - #[cfg(any(test, feature = "mocks"))] - AnyConsensusState::Mock(_cs) => ClientType::Mock, - } - } -} - -impl Protobuf for AnyConsensusState {} - -impl TryFrom for AnyConsensusState { - type Error = Error; - - fn try_from(value: Any) -> Result { - match value.type_url.as_str() { - "" => Err(Error::empty_consensus_state_response()), - - TENDERMINT_CONSENSUS_STATE_TYPE_URL => Ok(AnyConsensusState::Tendermint( - consensus_state::ConsensusState::decode_vec(&value.value) - .map_err(Error::decode_raw_client_state)?, - )), - - #[cfg(any(test, feature = "mocks"))] - MOCK_CONSENSUS_STATE_TYPE_URL => Ok(AnyConsensusState::Mock( - MockConsensusState::decode_vec(&value.value) - .map_err(Error::decode_raw_client_state)?, - )), - - _ => Err(Error::unknown_consensus_state_type(value.type_url)), - } - } -} - -impl From for Any { - fn from(value: AnyConsensusState) -> Self { - match value { - AnyConsensusState::Tendermint(value) => Any { - type_url: TENDERMINT_CONSENSUS_STATE_TYPE_URL.to_string(), - value: value - .encode_vec() - .expect("encoding to `Any` from `AnyConsensusState::Tendermint`"), - }, - #[cfg(any(test, feature = "mocks"))] - AnyConsensusState::Mock(value) => Any { - type_url: MOCK_CONSENSUS_STATE_TYPE_URL.to_string(), - value: value - .encode_vec() - .expect("encoding to `Any` from `AnyConsensusState::Mock`"), - }, - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize)] -pub struct AnyConsensusStateWithHeight { - pub height: Height, - pub consensus_state: AnyConsensusState, -} - -impl Protobuf for AnyConsensusStateWithHeight {} - -impl TryFrom for AnyConsensusStateWithHeight { - type Error = Error; - - fn try_from(value: ConsensusStateWithHeight) -> Result { - let state = value - .consensus_state - .map(AnyConsensusState::try_from) - .transpose()? - .ok_or_else(Error::empty_consensus_state_response)?; - - Ok(AnyConsensusStateWithHeight { - height: value.height.ok_or_else(Error::missing_height)?.into(), - consensus_state: state, - }) - } -} - -impl From for ConsensusStateWithHeight { - fn from(value: AnyConsensusStateWithHeight) -> Self { - ConsensusStateWithHeight { - height: Some(value.height.into()), - consensus_state: Some(value.consensus_state.into()), - } - } -} - -impl ConsensusState for AnyConsensusState { - type Error = Infallible; - - fn client_type(&self) -> ClientType { - self.client_type() - } - - fn root(&self) -> &CommitmentRoot { - match self { - Self::Tendermint(cs_state) => cs_state.root(), - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(mock_state) => mock_state.root(), - } - } - - fn wrap_any(self) -> AnyConsensusState { - self - } +use crate::{ + core::{ics23_commitment::commitment::CommitmentRoot, ics24_host::identifier::ClientId}, + events::WithBlockDataType, + prelude::*, + timestamp::Timestamp, +}; +use core::{ + fmt::Debug, + marker::{Send, Sync}, +}; + +pub trait ConsensusState: Clone + Debug + Send + Sync { + type Error; + + /// Commitment root of the consensus state, which is used for key-value pair verification. + fn root(&self) -> &CommitmentRoot; + + /// Returns the timestamp of the state. + fn timestamp(&self) -> Timestamp; + + fn downcast(self) -> Option + where + Self: 'static, + { + ::downcast_ref(&self).cloned() + } + + fn wrap(sub_state: &dyn core::any::Any) -> Option + where + Self: 'static, + { + sub_state.downcast_ref::().cloned() + } + + fn encode_to_vec(&self) -> Vec; } /// Query request for a single client event, identified by `event_id`, for `client_id`. #[derive(Clone, Debug)] pub struct QueryClientEventRequest { - pub height: crate::Height, - pub event_id: WithBlockDataType, - pub client_id: ClientId, - pub consensus_height: crate::Height, + pub height: crate::Height, + pub event_id: WithBlockDataType, + pub client_id: ClientId, + pub consensus_height: crate::Height, } diff --git a/modules/src/core/ics02_client/client_def.rs b/modules/src/core/ics02_client/client_def.rs index d0ced043c3..171cff9ada 100644 --- a/modules/src/core/ics02_client/client_def.rs +++ b/modules/src/core/ics02_client/client_def.rs @@ -1,700 +1,219 @@ -use ibc_proto::ibc::core::commitment::v1::MerkleProof; - -use crate::clients::ics07_tendermint::client_def::TendermintClient; -use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; -use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::context::ClientReader; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::header::{AnyHeader, Header}; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::context::ChannelReader; -use crate::core::ics04_channel::packet::Sequence; -use crate::core::ics23_commitment::commitment::{ - CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, +use crate::core::ics02_client::{client_consensus::ConsensusState, client_state::ClientState}; + +use crate::core::ics02_client::context::ClientTypes; +use crate::{ + core::{ + ics02_client::{client_message::ClientMessage, error::Error}, + ics03_connection::connection::ConnectionEnd, + ics04_channel::{ + channel::ChannelEnd, + commitment::{AcknowledgementCommitment, PacketCommitment}, + packet::Sequence, + }, + ics23_commitment::commitment::{CommitmentPrefix, CommitmentProofBytes, CommitmentRoot}, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + ics26_routing::context::ReaderContext, + }, + prelude::*, + Height, }; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::downcast; -use crate::prelude::*; -use crate::Height; - -#[cfg(any(test, feature = "mocks"))] -use crate::mock::client_def::MockClient; - -pub trait ClientDef: Clone { - type Header: Header; - type ClientState: ClientState; - type ConsensusState: ConsensusState; - - fn check_header_and_update_state( - &self, - ctx: &dyn ClientReader, - client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result<(Self::ClientState, Self::ConsensusState), Error>; - - /// TODO - fn verify_upgrade_and_update_state( - &self, - client_state: &Self::ClientState, - consensus_state: &Self::ConsensusState, - proof_upgrade_client: MerkleProof, - proof_upgrade_consensus_state: MerkleProof, - ) -> Result<(Self::ClientState, Self::ConsensusState), Error>; - - /// Verification functions as specified in: - /// - /// - /// Verify a `proof` that the consensus state of a given client (at height `consensus_height`) - /// matches the input `consensus_state`. The parameter `counterparty_height` represent the - /// height of the counterparty chain that this proof assumes (i.e., the height at which this - /// proof was computed). - #[allow(clippy::too_many_arguments)] - fn verify_client_consensus_state( - &self, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - client_id: &ClientId, - consensus_height: Height, - expected_consensus_state: &AnyConsensusState, - ) -> Result<(), Error>; - - /// Verify a `proof` that a connection state matches that of the input `connection_end`. - #[allow(clippy::too_many_arguments)] - fn verify_connection_state( - &self, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - connection_id: &ConnectionId, - expected_connection_end: &ConnectionEnd, - ) -> Result<(), Error>; - - /// Verify a `proof` that a channel state matches that of the input `channel_end`. - #[allow(clippy::too_many_arguments)] - fn verify_channel_state( - &self, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - expected_channel_end: &ChannelEnd, - ) -> Result<(), Error>; - - /// Verify the client state for this chain that it is stored on the counterparty chain. - #[allow(clippy::too_many_arguments)] - fn verify_client_full_state( - &self, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - client_id: &ClientId, - expected_client_state: &AnyClientState, - ) -> Result<(), Error>; - - /// Verify a `proof` that a packet has been commited. - #[allow(clippy::too_many_arguments)] - fn verify_packet_data( - &self, - ctx: &dyn ChannelReader, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - commitment: PacketCommitment, - ) -> Result<(), Error>; - - /// Verify a `proof` that a packet has been commited. - #[allow(clippy::too_many_arguments)] - fn verify_packet_acknowledgement( - &self, - ctx: &dyn ChannelReader, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ack: AcknowledgementCommitment, - ) -> Result<(), Error>; - - /// Verify a `proof` that of the next_seq_received. - #[allow(clippy::too_many_arguments)] - fn verify_next_sequence_recv( - &self, - ctx: &dyn ChannelReader, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ) -> Result<(), Error>; - - /// Verify a `proof` that a packet has not been received. - #[allow(clippy::too_many_arguments)] - fn verify_packet_receipt_absence( - &self, - ctx: &dyn ChannelReader, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ) -> Result<(), Error>; -} +use core::fmt::Debug; -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum AnyClient { - Tendermint(TendermintClient), - - #[cfg(any(test, feature = "mocks"))] - Mock(MockClient), +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum ConsensusUpdateResult { + Single(C::AnyConsensusState), + Batch(Vec<(Height, C::AnyConsensusState)>), } -impl AnyClient { - pub fn from_client_type(client_type: ClientType) -> AnyClient { - match client_type { - ClientType::Tendermint => Self::Tendermint(TendermintClient::default()), - - #[cfg(any(test, feature = "mocks"))] - ClientType::Mock => Self::Mock(MockClient), - } - } +impl ConsensusUpdateResult { + pub fn map_state(self, f: F) -> ConsensusUpdateResult + where + F: Fn(C::AnyConsensusState) -> D::AnyConsensusState, + { + match self { + ConsensusUpdateResult::Single(cs) => ConsensusUpdateResult::Single(f(cs)), + ConsensusUpdateResult::Batch(cs) => { + ConsensusUpdateResult::Batch(cs.into_iter().map(|(h, s)| (h, f(s))).collect()) + }, + } + } } -// ⚠️ Beware of the awful boilerplate below ⚠️ -impl ClientDef for AnyClient { - type Header = AnyHeader; - type ClientState = AnyClientState; - type ConsensusState = AnyConsensusState; - - /// Validates an incoming `header` against the latest consensus state of this client. - fn check_header_and_update_state( - &self, - ctx: &dyn ClientReader, - client_id: ClientId, - client_state: AnyClientState, - header: AnyHeader, - ) -> Result<(AnyClientState, AnyConsensusState), Error> { - match self { - Self::Tendermint(client) => { - let (client_state, header) = downcast!( - client_state => AnyClientState::Tendermint, - header => AnyHeader::Tendermint, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - let (new_state, new_consensus) = - client.check_header_and_update_state(ctx, client_id, client_state, header)?; - - Ok(( - AnyClientState::Tendermint(new_state), - AnyConsensusState::Tendermint(new_consensus), - )) - } - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let (client_state, header) = downcast!( - client_state => AnyClientState::Mock, - header => AnyHeader::Mock, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - let (new_state, new_consensus) = - client.check_header_and_update_state(ctx, client_id, client_state, header)?; - - Ok(( - AnyClientState::Mock(new_state), - AnyConsensusState::Mock(new_consensus), - )) - } - } - } - - fn verify_client_consensus_state( - &self, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - client_id: &ClientId, - consensus_height: Height, - expected_consensus_state: &AnyConsensusState, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!( - client_state => AnyClientState::Tendermint - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_client_consensus_state( - client_state, - height, - prefix, - proof, - root, - client_id, - consensus_height, - expected_consensus_state, - ) - } - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!( - client_state => AnyClientState::Mock - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_client_consensus_state( - client_state, - height, - prefix, - proof, - root, - client_id, - consensus_height, - expected_consensus_state, - ) - } - } - } - - fn verify_connection_state( - &self, - client_state: &AnyClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - connection_id: &ConnectionId, - expected_connection_end: &ConnectionEnd, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!(client_state => AnyClientState::Tendermint) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_connection_state( - client_state, - height, - prefix, - proof, - root, - connection_id, - expected_connection_end, - ) - } - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!(client_state => AnyClientState::Mock) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_connection_state( - client_state, - height, - prefix, - proof, - root, - connection_id, - expected_connection_end, - ) - } - } - } - - fn verify_channel_state( - &self, - client_state: &AnyClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - expected_channel_end: &ChannelEnd, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!(client_state => AnyClientState::Tendermint) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_channel_state( - client_state, - height, - prefix, - proof, - root, - port_id, - channel_id, - expected_channel_end, - ) - } - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!(client_state => AnyClientState::Mock) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_channel_state( - client_state, - height, - prefix, - proof, - root, - port_id, - channel_id, - expected_channel_end, - ) - } - } - } - - fn verify_client_full_state( - &self, - client_state: &Self::ClientState, - height: Height, - prefix: &CommitmentPrefix, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - client_id: &ClientId, - client_state_on_counterparty: &AnyClientState, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!( - client_state => AnyClientState::Tendermint - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_client_full_state( - client_state, - height, - prefix, - proof, - root, - client_id, - client_state_on_counterparty, - ) - } - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!( - client_state => AnyClientState::Mock - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_client_full_state( - client_state, - height, - prefix, - proof, - root, - client_id, - client_state_on_counterparty, - ) - } - } - } - fn verify_packet_data( - &self, - ctx: &dyn ChannelReader, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - commitment: PacketCommitment, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!( - client_state => AnyClientState::Tendermint - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_packet_data( - ctx, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - commitment, - ) - } - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!( - client_state => AnyClientState::Mock - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_packet_data( - ctx, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - commitment, - ) - } - } - } - - fn verify_packet_acknowledgement( - &self, - ctx: &dyn ChannelReader, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ack_commitment: AcknowledgementCommitment, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!( - client_state => AnyClientState::Tendermint - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_packet_acknowledgement( - ctx, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - ack_commitment, - ) - } - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!( - client_state => AnyClientState::Mock - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_packet_acknowledgement( - ctx, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - ack_commitment, - ) - } - } - } - - fn verify_next_sequence_recv( - &self, - ctx: &dyn ChannelReader, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!( - client_state => AnyClientState::Tendermint - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_next_sequence_recv( - ctx, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - ) - } - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!( - client_state => AnyClientState::Mock - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_next_sequence_recv( - ctx, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - ) - } - } - } - fn verify_packet_receipt_absence( - &self, - ctx: &dyn ChannelReader, - client_state: &Self::ClientState, - height: Height, - connection_end: &ConnectionEnd, - proof: &CommitmentProofBytes, - root: &CommitmentRoot, - port_id: &PortId, - channel_id: &ChannelId, - sequence: Sequence, - ) -> Result<(), Error> { - match self { - Self::Tendermint(client) => { - let client_state = downcast!( - client_state => AnyClientState::Tendermint - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - client.verify_packet_receipt_absence( - ctx, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - ) - } - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let client_state = downcast!( - client_state => AnyClientState::Mock - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - client.verify_packet_receipt_absence( - ctx, - client_state, - height, - connection_end, - proof, - root, - port_id, - channel_id, - sequence, - ) - } - } - } - - fn verify_upgrade_and_update_state( - &self, - client_state: &Self::ClientState, - consensus_state: &Self::ConsensusState, - proof_upgrade_client: MerkleProof, - proof_upgrade_consensus_state: MerkleProof, - ) -> Result<(Self::ClientState, Self::ConsensusState), Error> { - match self { - Self::Tendermint(client) => { - let (client_state, consensus_state) = downcast!( - client_state => AnyClientState::Tendermint, - consensus_state => AnyConsensusState::Tendermint, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Tendermint))?; - - let (new_state, new_consensus) = client.verify_upgrade_and_update_state( - client_state, - consensus_state, - proof_upgrade_client, - proof_upgrade_consensus_state, - )?; - - Ok(( - AnyClientState::Tendermint(new_state), - AnyConsensusState::Tendermint(new_consensus), - )) - } - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(client) => { - let (client_state, consensus_state) = downcast!( - client_state => AnyClientState::Mock, - consensus_state => AnyConsensusState::Mock, - ) - .ok_or_else(|| Error::client_args_type_mismatch(ClientType::Mock))?; - - let (new_state, new_consensus) = client.verify_upgrade_and_update_state( - client_state, - consensus_state, - proof_upgrade_client, - proof_upgrade_consensus_state, - )?; - - Ok(( - AnyClientState::Mock(new_state), - AnyConsensusState::Mock(new_consensus), - )) - } - } - } +pub trait ClientDef: Clone { + type ClientMessage: ClientMessage; + type ClientState: ClientState + Eq; + type ConsensusState: ConsensusState + Eq; + + fn verify_client_message( + &self, + ctx: &Ctx, + client_id: ClientId, + client_state: Self::ClientState, + client_msg: Self::ClientMessage, + ) -> Result<(), Error>; + + fn update_state( + &self, + ctx: &Ctx, + client_id: ClientId, + client_state: Self::ClientState, + client_msg: Self::ClientMessage, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; + + fn update_state_on_misbehaviour( + &self, + client_state: Self::ClientState, + client_msg: Self::ClientMessage, + ) -> Result; + + fn check_for_misbehaviour( + &self, + ctx: &Ctx, + client_id: ClientId, + client_state: Self::ClientState, + client_msg: Self::ClientMessage, + ) -> Result; + + fn verify_upgrade_and_update_state( + &self, + ctx: &Ctx, + client_id: ClientId, + old_client_state: &Self::ClientState, + upgrade_client_state: &Self::ClientState, + upgrade_consensus_state: &Self::ConsensusState, + proof_upgrade_client: Vec, + proof_upgrade_consensus_state: Vec, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error>; + + /// Verification functions as specified in: + /// + /// + /// Verify a `proof` that the consensus state of a given client (at height `consensus_height`) + /// matches the input `consensus_state`. The parameter `counterparty_height` represent the + /// height of the counterparty chain that this proof assumes (i.e., the height at which this + /// proof was computed). + #[allow(clippy::too_many_arguments)] + fn verify_client_consensus_state( + &self, + ctx: &Ctx, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + client_id: &ClientId, + consensus_height: Height, + expected_consensus_state: &Ctx::AnyConsensusState, + ) -> Result<(), Error>; + + /// Verify a `proof` that a connection state matches that of the input `connection_end`. + #[allow(clippy::too_many_arguments)] + fn verify_connection_state( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + connection_id: &ConnectionId, + expected_connection_end: &ConnectionEnd, + ) -> Result<(), Error>; + + /// Verify a `proof` that a channel state matches that of the input `channel_end`. + #[allow(clippy::too_many_arguments)] + fn verify_channel_state( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + expected_channel_end: &ChannelEnd, + ) -> Result<(), Error>; + + /// Verify the client state for this chain that it is stored on the counterparty chain. + #[allow(clippy::too_many_arguments)] + fn verify_client_full_state( + &self, + ctx: &Ctx, + client_state: &Self::ClientState, + height: Height, + prefix: &CommitmentPrefix, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + client_id: &ClientId, + expected_client_state: &Ctx::AnyClientState, + ) -> Result<(), Error>; + + /// Verify a `proof` that a packet has been commited. + #[allow(clippy::too_many_arguments)] + fn verify_packet_data( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + commitment: PacketCommitment, + ) -> Result<(), Error>; + + /// Verify a `proof` that a packet has been commited. + #[allow(clippy::too_many_arguments)] + fn verify_packet_acknowledgement( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + ack: AcknowledgementCommitment, + ) -> Result<(), Error>; + + /// Verify a `proof` that of the next_seq_received. + #[allow(clippy::too_many_arguments)] + fn verify_next_sequence_recv( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + ) -> Result<(), Error>; + + /// Verify a `proof` that a packet has not been received. + #[allow(clippy::too_many_arguments)] + fn verify_packet_receipt_absence( + &self, + ctx: &Ctx, + client_id: &ClientId, + client_state: &Self::ClientState, + height: Height, + connection_end: &ConnectionEnd, + proof: &CommitmentProofBytes, + root: &CommitmentRoot, + port_id: &PortId, + channel_id: &ChannelId, + sequence: Sequence, + ) -> Result<(), Error>; } diff --git a/modules/src/core/ics02_client/client_message.rs b/modules/src/core/ics02_client/client_message.rs new file mode 100644 index 0000000000..cb5bcd4cdf --- /dev/null +++ b/modules/src/core/ics02_client/client_message.rs @@ -0,0 +1,20 @@ +use alloc::vec::Vec; + +/// Abstract of consensus state update information +pub trait ClientMessage: Clone + core::fmt::Debug + Send + Sync { + fn downcast(self) -> Option + where + Self: 'static, + { + ::downcast_ref(&self).cloned() + } + + fn wrap(sub_state: &dyn core::any::Any) -> Option + where + Self: 'static, + { + sub_state.downcast_ref::().cloned() + } + + fn encode_to_vec(&self) -> Vec; +} diff --git a/modules/src/core/ics02_client/client_state.rs b/modules/src/core/ics02_client/client_state.rs index 4c086b6e6a..75a4b2a2e8 100644 --- a/modules/src/core/ics02_client/client_state.rs +++ b/modules/src/core/ics02_client/client_state.rs @@ -1,308 +1,74 @@ -use core::marker::{Send, Sync}; -use core::time::Duration; - -use ibc_proto::google::protobuf::Any; -use serde::{Deserialize, Serialize}; -use tendermint_proto::Protobuf; - -use ibc_proto::ibc::core::client::v1::IdentifiedClientState; - -use crate::clients::ics07_tendermint::client_state; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::trust_threshold::TrustThreshold; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::{ChainId, ClientId}; -#[cfg(any(test, feature = "mocks"))] -use crate::mock::client_state::MockClientState; -use crate::prelude::*; -use crate::Height; - -pub const TENDERMINT_CLIENT_STATE_TYPE_URL: &str = "/ibc.lightclients.tendermint.v1.ClientState"; -pub const MOCK_CLIENT_STATE_TYPE_URL: &str = "/ibc.mock.ClientState"; - -pub trait ClientState: Clone + core::fmt::Debug + Send + Sync { - /// Client-specific options for upgrading the client - type UpgradeOptions; - - /// Return the chain identifier which this client is serving (i.e., the client is verifying - /// consensus states from this chain). - fn chain_id(&self) -> ChainId; - - /// Type of client associated with this state (eg. Tendermint) - fn client_type(&self) -> ClientType; - - /// Latest height of consensus state - fn latest_height(&self) -> Height; - - /// Freeze status of the client - fn is_frozen(&self) -> bool { - self.frozen_height().is_some() - } - - /// Frozen height of the client - fn frozen_height(&self) -> Option; - - /// Helper function to verify the upgrade client procedure. - /// Resets all fields except the blockchain-specific ones, - /// and updates the given fields. - fn upgrade( - self, - upgrade_height: Height, - upgrade_options: Self::UpgradeOptions, - chain_id: ChainId, - ) -> Self; - - /// Wrap into an `AnyClientState` - fn wrap_any(self) -> AnyClientState; -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(tag = "type")] -pub enum AnyUpgradeOptions { - Tendermint(client_state::UpgradeOptions), - - #[cfg(any(test, feature = "mocks"))] - Mock(()), -} - -impl AnyUpgradeOptions { - fn into_tendermint(self) -> client_state::UpgradeOptions { - match self { - Self::Tendermint(options) => options, - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(_) => { - panic!("cannot downcast AnyUpgradeOptions::Mock to Tendermint::UpgradeOptions") - } - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(tag = "type")] -pub enum AnyClientState { - Tendermint(client_state::ClientState), - - #[cfg(any(test, feature = "mocks"))] - Mock(MockClientState), -} - -impl AnyClientState { - pub fn latest_height(&self) -> Height { - match self { - Self::Tendermint(tm_state) => tm_state.latest_height(), - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(mock_state) => mock_state.latest_height(), - } - } - - pub fn frozen_height(&self) -> Option { - match self { - Self::Tendermint(tm_state) => tm_state.frozen_height(), - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(mock_state) => mock_state.frozen_height(), - } - } - - pub fn trust_threshold(&self) -> Option { - match self { - AnyClientState::Tendermint(state) => Some(state.trust_level), - - #[cfg(any(test, feature = "mocks"))] - AnyClientState::Mock(_) => None, - } - } - - pub fn max_clock_drift(&self) -> Duration { - match self { - AnyClientState::Tendermint(state) => state.max_clock_drift, - - #[cfg(any(test, feature = "mocks"))] - AnyClientState::Mock(_) => Duration::new(0, 0), - } - } - - pub fn client_type(&self) -> ClientType { - match self { - Self::Tendermint(state) => state.client_type(), - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(state) => state.client_type(), - } - } - - pub fn refresh_period(&self) -> Option { - match self { - AnyClientState::Tendermint(tm_state) => tm_state.refresh_time(), - - #[cfg(any(test, feature = "mocks"))] - AnyClientState::Mock(mock_state) => mock_state.refresh_time(), - } - } - - pub fn expired(&self, elapsed_since_latest: Duration) -> bool { - match self { - AnyClientState::Tendermint(tm_state) => tm_state.expired(elapsed_since_latest), - - #[cfg(any(test, feature = "mocks"))] - AnyClientState::Mock(mock_state) => mock_state.expired(elapsed_since_latest), - } - } -} - -impl Protobuf for AnyClientState {} - -impl TryFrom for AnyClientState { - type Error = Error; - - fn try_from(raw: Any) -> Result { - match raw.type_url.as_str() { - "" => Err(Error::empty_client_state_response()), - - TENDERMINT_CLIENT_STATE_TYPE_URL => Ok(AnyClientState::Tendermint( - client_state::ClientState::decode_vec(&raw.value) - .map_err(Error::decode_raw_client_state)?, - )), - - #[cfg(any(test, feature = "mocks"))] - MOCK_CLIENT_STATE_TYPE_URL => Ok(AnyClientState::Mock( - MockClientState::decode_vec(&raw.value).map_err(Error::decode_raw_client_state)?, - )), - - _ => Err(Error::unknown_client_state_type(raw.type_url)), - } - } -} - -impl From for Any { - fn from(value: AnyClientState) -> Self { - match value { - AnyClientState::Tendermint(value) => Any { - type_url: TENDERMINT_CLIENT_STATE_TYPE_URL.to_string(), - value: value - .encode_vec() - .expect("encoding to `Any` from `AnyClientState::Tendermint`"), - }, - #[cfg(any(test, feature = "mocks"))] - AnyClientState::Mock(value) => Any { - type_url: MOCK_CLIENT_STATE_TYPE_URL.to_string(), - value: value - .encode_vec() - .expect("encoding to `Any` from `AnyClientState::Mock`"), - }, - } - } +use crate::{ + core::{ics02_client::client_def::ClientDef, ics24_host::identifier::ChainId}, + prelude::*, + Height, +}; +use core::{ + fmt::Debug, + marker::{Send, Sync}, + time::Duration, +}; +use alloc::string::String; + +pub trait ClientState: Clone + Debug + Send + Sync { + /// Client-specific options for upgrading the client + type UpgradeOptions; + type ClientDef: ClientDef; + + /// Return the chain identifier which this client is serving (i.e., the client is verifying + /// consensus states from this chain). + fn chain_id(&self) -> ChainId; + + /// Type of client associated with this state (eg. Tendermint) + fn client_def(&self) -> Self::ClientDef; + + /// Returns one of the prefixes that should be present in any client identifiers. + /// The prefix is deterministic for a given chain type, hence all clients for a Tendermint-type + /// chain, for example, will have the prefix '07-tendermint'. + fn client_type(&self) -> ClientType; + + /// Latest height of consensus state + fn latest_height(&self) -> Height; + + /// Freeze status of the client + fn is_frozen(&self) -> bool { + self.frozen_height().is_some() + } + + /// Frozen height of the client + fn frozen_height(&self) -> Option; + + /// Helper function to verify the upgrade client procedure. + /// Resets all fields except the blockchain-specific ones, + /// and updates the given fields. + fn upgrade( + self, + upgrade_height: Height, + upgrade_options: Self::UpgradeOptions, + chain_id: ChainId, + ) -> Self; + + /// Helper function to verify the upgrade client procedure. + fn expired(&self, elapsed: Duration) -> bool; + + /// Performs downcast of the client state from an "AnyClientState" type to T, otherwise + /// panics. Downcast from `T` to `T` is always successful. + fn downcast(self) -> Option + where + Self: 'static, + { + ::downcast_ref(&self).cloned() + } + + fn wrap(sub_state: &dyn core::any::Any) -> Option + where + Self: 'static, + { + sub_state.downcast_ref::().cloned() + } + + fn encode_to_vec(&self) -> Vec; } -impl ClientState for AnyClientState { - type UpgradeOptions = AnyUpgradeOptions; - - fn chain_id(&self) -> ChainId { - match self { - AnyClientState::Tendermint(tm_state) => tm_state.chain_id(), - - #[cfg(any(test, feature = "mocks"))] - AnyClientState::Mock(mock_state) => mock_state.chain_id(), - } - } - - fn client_type(&self) -> ClientType { - self.client_type() - } - - fn latest_height(&self) -> Height { - self.latest_height() - } - - fn frozen_height(&self) -> Option { - self.frozen_height() - } - - fn upgrade( - self, - upgrade_height: Height, - upgrade_options: Self::UpgradeOptions, - chain_id: ChainId, - ) -> Self { - match self { - AnyClientState::Tendermint(tm_state) => tm_state - .upgrade(upgrade_height, upgrade_options.into_tendermint(), chain_id) - .wrap_any(), - - #[cfg(any(test, feature = "mocks"))] - AnyClientState::Mock(mock_state) => { - mock_state.upgrade(upgrade_height, (), chain_id).wrap_any() - } - } - } - - fn wrap_any(self) -> AnyClientState { - self - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(tag = "type")] -pub struct IdentifiedAnyClientState { - pub client_id: ClientId, - pub client_state: AnyClientState, -} - -impl IdentifiedAnyClientState { - pub fn new(client_id: ClientId, client_state: AnyClientState) -> Self { - IdentifiedAnyClientState { - client_id, - client_state, - } - } -} - -impl Protobuf for IdentifiedAnyClientState {} - -impl TryFrom for IdentifiedAnyClientState { - type Error = Error; - - fn try_from(raw: IdentifiedClientState) -> Result { - Ok(IdentifiedAnyClientState { - client_id: raw.client_id.parse().map_err(|e: ValidationError| { - Error::invalid_raw_client_id(raw.client_id.clone(), e) - })?, - client_state: raw - .client_state - .ok_or_else(Error::missing_raw_client_state)? - .try_into()?, - }) - } -} - -impl From for IdentifiedClientState { - fn from(value: IdentifiedAnyClientState) -> Self { - IdentifiedClientState { - client_id: value.client_id.to_string(), - client_state: Some(value.client_state.into()), - } - } -} - -#[cfg(test)] -mod tests { - - use ibc_proto::google::protobuf::Any; - use test_log::test; - - use crate::clients::ics07_tendermint::client_state::test_util::get_dummy_tendermint_client_state; - use crate::clients::ics07_tendermint::header::test_util::get_dummy_tendermint_header; - use crate::core::ics02_client::client_state::AnyClientState; - - #[test] - fn any_client_state_serialization() { - let tm_client_state = get_dummy_tendermint_client_state(get_dummy_tendermint_header()); - - let raw: Any = tm_client_state.clone().into(); - let tm_client_state_back = AnyClientState::try_from(raw).unwrap(); - assert_eq!(tm_client_state, tm_client_state_back); - } -} +/// Type of the client, depending on the specific consensus algorithm. +pub type ClientType = String; diff --git a/modules/src/core/ics02_client/client_type.rs b/modules/src/core/ics02_client/client_type.rs index 78973c800e..8b13789179 100644 --- a/modules/src/core/ics02_client/client_type.rs +++ b/modules/src/core/ics02_client/client_type.rs @@ -1,112 +1 @@ -use crate::prelude::*; -use core::fmt; -use serde_derive::{Deserialize, Serialize}; -use super::error::Error; - -/// Type of the client, depending on the specific consensus algorithm. -#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] -pub enum ClientType { - Tendermint = 1, - - #[cfg(any(test, feature = "mocks"))] - Mock = 9999, -} - -impl ClientType { - const TENDERMINT_STR: &'static str = "07-tendermint"; - - #[cfg_attr(not(test), allow(dead_code))] - const MOCK_STR: &'static str = "9999-mock"; - - /// Yields the identifier of this client type as a string - pub fn as_str(&self) -> &'static str { - match self { - Self::Tendermint => Self::TENDERMINT_STR, - - #[cfg(any(test, feature = "mocks"))] - Self::Mock => Self::MOCK_STR, - } - } -} - -impl fmt::Display for ClientType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "ClientType({})", self.as_str()) - } -} - -impl core::str::FromStr for ClientType { - type Err = Error; - - fn from_str(s: &str) -> Result { - match s { - Self::TENDERMINT_STR => Ok(Self::Tendermint), - - #[cfg(any(test, feature = "mocks"))] - Self::MOCK_STR => Ok(Self::Mock), - - _ => Err(Error::unknown_client_type(s.to_string())), - } - } -} - -#[cfg(test)] -mod tests { - use core::str::FromStr; - use test_log::test; - - use super::ClientType; - use crate::core::ics02_client::error::{Error, ErrorDetail}; - - #[test] - fn parse_tendermint_client_type() { - let client_type = ClientType::from_str("07-tendermint"); - - match client_type { - Ok(ClientType::Tendermint) => (), - _ => panic!("parse failed"), - } - } - - #[test] - fn parse_mock_client_type() { - let client_type = ClientType::from_str("9999-mock"); - - match client_type { - Ok(ClientType::Mock) => (), - _ => panic!("parse failed"), - } - } - - #[test] - fn parse_unknown_client_type() { - let client_type_str = "some-random-client-type"; - let result = ClientType::from_str(client_type_str); - - match result { - Err(Error(ErrorDetail::UnknownClientType(e), _)) => { - assert_eq!(&e.client_type, client_type_str) - } - _ => { - panic!("Expected ClientType::from_str to fail with UnknownClientType, instead got",) - } - } - } - - #[test] - fn parse_mock_as_string_result() { - let client_type = ClientType::Mock; - let type_string = client_type.as_str(); - let client_type_from_str = ClientType::from_str(type_string).unwrap(); - assert_eq!(client_type_from_str, client_type); - } - - #[test] - fn parse_tendermint_as_string_result() { - let client_type = ClientType::Tendermint; - let type_string = client_type.as_str(); - let client_type_from_str = ClientType::from_str(type_string).unwrap(); - assert_eq!(client_type_from_str, client_type); - } -} diff --git a/modules/src/core/ics02_client/context.rs b/modules/src/core/ics02_client/context.rs index b8c0ce4741..c7de0fe15f 100644 --- a/modules/src/core/ics02_client/context.rs +++ b/modules/src/core/ics02_client/context.rs @@ -2,184 +2,259 @@ //! that any host chain must implement to be able to process any `ClientMsg`. See //! "ADR 003: IBC protocol implementation" for more details. -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::{Error, ErrorDetail}; -use crate::core::ics02_client::handler::ClientResult::{self, Create, Update, Upgrade}; -use crate::core::ics24_host::identifier::ClientId; -use crate::timestamp::Timestamp; -use crate::Height; +use crate::{ + core::{ + ics02_client::{ + client_consensus::ConsensusState, + client_def::{ClientDef, ConsensusUpdateResult}, + client_message::ClientMessage, + client_state::{ClientState, ClientType}, + error::{Error, ErrorDetail}, + handler::ClientResult::{self, Create, Update, Upgrade}, + }, + ics24_host::identifier::ClientId, + }, + timestamp::Timestamp, + Height, +}; +use alloc::{string::String, vec::Vec}; +use core::fmt::Debug; /// Defines the read-only part of ICS2 (client functions) context. -pub trait ClientReader { - fn client_type(&self, client_id: &ClientId) -> Result; - fn client_state(&self, client_id: &ClientId) -> Result; - - /// Retrieve the consensus state for the given client ID at the specified - /// height. - /// - /// Returns an error if no such state exists. - fn consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result; - - /// Similar to `consensus_state`, attempt to retrieve the consensus state, - /// but return `None` if no state exists at the given height. - fn maybe_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result, Error> { - match self.consensus_state(client_id, height) { - Ok(cs) => Ok(Some(cs)), - Err(e) => match e.detail() { - ErrorDetail::ConsensusStateNotFound(_) => Ok(None), - _ => Err(e), - }, - } - } - - /// Search for the lowest consensus state higher than `height`. - fn next_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result, Error>; - - /// Search for the highest consensus state lower than `height`. - fn prev_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result, Error>; - - /// Returns the current height of the local chain. - fn host_height(&self) -> Height; - - /// Returns the current timestamp of the local chain. - fn host_timestamp(&self) -> Timestamp { - let pending_consensus_state = self - .pending_host_consensus_state() - .expect("host must have pending consensus state"); - pending_consensus_state.timestamp() - } - - /// Returns the `ConsensusState` of the host (local) chain at a specific height. - fn host_consensus_state(&self, height: Height) -> Result; - - /// Returns the pending `ConsensusState` of the host (local) chain. - fn pending_host_consensus_state(&self) -> Result; - - /// Returns a natural number, counting how many clients have been created thus far. - /// The value of this counter should increase only via method `ClientKeeper::increase_client_counter`. - fn client_counter(&self) -> Result; +pub trait ClientReader: ClientKeeper { + fn client_type(&self, client_id: &ClientId) -> Result; + fn client_state(&self, client_id: &ClientId) -> Result; + + /// Retrieve the consensus state for the given client ID at the specified + /// height. + /// + /// Returns an error if no such state exists. + fn consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result; + + /// This should return the host type. + fn host_client_type(&self) -> String; + + /// Similar to `consensus_state`, attempt to retrieve the consensus state, + /// but return `None` if no state exists at the given height. + fn maybe_consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result, Error> { + match self.consensus_state(client_id, height) { + Ok(cs) => Ok(Some(cs)), + Err(e) => match e.detail() { + ErrorDetail::ConsensusStateNotFound(_) => Ok(None), + _ => Err(e), + }, + } + } + + /// Search for the lowest consensus state higher than `height`. + fn next_consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result, Error>; + + /// Search for the highest consensus state lower than `height`. + fn prev_consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result, Error>; + + /// Returns the current height of the local chain. + fn host_height(&self) -> Height; + + /// Returns the current timestamp of the local chain. + fn host_timestamp(&self) -> Timestamp; + + /// Returns the `ConsensusState` of the host (local) chain at a specific height. + /// If this is fetched from a proof whose origin is off-chain, it should ideally be verified + /// first. + fn host_consensus_state( + &self, + height: Height, + proof: Option>, + ) -> Result; + + /// Returns a natural number, counting how many clients have been created thus far. + /// The value of this counter should increase only via method + /// `ClientKeeper::increase_client_counter`. + fn client_counter(&self) -> Result; +} + +pub trait ClientTypes: 'static { + type AnyClientMessage: ClientMessage; + type AnyClientState: ClientState + Eq; + type AnyConsensusState: ConsensusState + Eq + 'static; + + /// Client definition type (used for verification) + type ClientDef: ClientDef< + ClientMessage = Self::AnyClientMessage, + ClientState = Self::AnyClientState, + ConsensusState = Self::AnyConsensusState, + >; } /// Defines the write-only part of ICS2 (client functions) context. -pub trait ClientKeeper { - fn store_client_result(&mut self, handler_res: ClientResult) -> Result<(), Error> { - match handler_res { - Create(res) => { - let client_id = res.client_id.clone(); - - self.store_client_type(client_id.clone(), res.client_type)?; - self.store_client_state(client_id.clone(), res.client_state.clone())?; - self.store_consensus_state( - client_id, - res.client_state.latest_height(), - res.consensus_state, - )?; - self.increase_client_counter(); - self.store_update_time( - res.client_id.clone(), - res.client_state.latest_height(), - res.processed_time, - )?; - self.store_update_height( - res.client_id, - res.client_state.latest_height(), - res.processed_height, - )?; - Ok(()) - } - Update(res) => { - self.store_client_state(res.client_id.clone(), res.client_state.clone())?; - self.store_consensus_state( - res.client_id.clone(), - res.client_state.latest_height(), - res.consensus_state, - )?; - self.store_update_time( - res.client_id.clone(), - res.client_state.latest_height(), - res.processed_time, - )?; - self.store_update_height( - res.client_id, - res.client_state.latest_height(), - res.processed_height, - )?; - Ok(()) - } - Upgrade(res) => { - self.store_client_state(res.client_id.clone(), res.client_state.clone())?; - self.store_consensus_state( - res.client_id.clone(), - res.client_state.latest_height(), - res.consensus_state, - )?; - Ok(()) - } - } - } - - /// Called upon successful client creation - fn store_client_type( - &mut self, - client_id: ClientId, - client_type: ClientType, - ) -> Result<(), Error>; - - /// Called upon successful client creation and update - fn store_client_state( - &mut self, - client_id: ClientId, - client_state: AnyClientState, - ) -> Result<(), Error>; - - /// Called upon successful client creation and update - fn store_consensus_state( - &mut self, - client_id: ClientId, - height: Height, - consensus_state: AnyConsensusState, - ) -> Result<(), Error>; - - /// Called upon client creation. - /// Increases the counter which keeps track of how many clients have been created. - /// Should never fail. - fn increase_client_counter(&mut self); - - /// Called upon successful client update. - /// Implementations are expected to use this to record the specified time as the time at which - /// this update (or header) was processed. - fn store_update_time( - &mut self, - client_id: ClientId, - height: Height, - timestamp: Timestamp, - ) -> Result<(), Error>; - - /// Called upon successful client update. - /// Implementations are expected to use this to record the specified height as the height at - /// at which this update (or header) was processed. - fn store_update_height( - &mut self, - client_id: ClientId, - height: Height, - host_height: Height, - ) -> Result<(), Error>; +pub trait ClientKeeper: ClientTypes +where + Self: Clone + Debug + Eq, +{ + fn store_client_result(&mut self, handler_res: ClientResult) -> Result<(), Error> { + match handler_res { + Create(res) => { + let client_id = res.client_id.clone(); + + self.store_client_type(client_id.clone(), res.client_type)?; + self.store_client_state(client_id.clone(), res.client_state.clone())?; + self.store_consensus_state( + client_id, + res.client_state.latest_height(), + res.consensus_state, + )?; + self.increase_client_counter(); + self.store_update_time( + res.client_id.clone(), + res.client_state.latest_height(), + res.processed_time, + )?; + self.store_update_height( + res.client_id, + res.client_state.latest_height(), + res.processed_height, + )?; + Ok(()) + }, + Update(res) => { + self.store_client_state(res.client_id.clone(), res.client_state.clone())?; + match res.consensus_state { + None => {}, + Some(cs_state_update) => match cs_state_update { + ConsensusUpdateResult::Single(cs_state) => { + self.store_consensus_state( + res.client_id.clone(), + res.client_state.latest_height(), + cs_state, + )?; + + self.store_update_time( + res.client_id.clone(), + res.client_state.latest_height(), + res.processed_time, + )?; + self.store_update_height( + res.client_id, + res.client_state.latest_height(), + res.processed_height, + )?; + }, + ConsensusUpdateResult::Batch(cs_states) => { + for (height, cs_state) in cs_states { + self.store_consensus_state( + res.client_id.clone(), + height, + cs_state, + )?; + self.store_update_time( + res.client_id.clone(), + height, + res.processed_time, + )?; + self.store_update_height( + res.client_id.clone(), + height, + res.processed_height, + )?; + } + }, + }, + } + Ok(()) + }, + Upgrade(res) => { + self.store_client_state(res.client_id.clone(), res.client_state.clone())?; + match res.consensus_state { + None => {}, + Some(cs_state_update) => match cs_state_update { + ConsensusUpdateResult::Single(cs_state) => { + self.store_consensus_state( + res.client_id.clone(), + res.client_state.latest_height(), + cs_state, + )?; + }, + ConsensusUpdateResult::Batch(cs_states) => { + for (height, cs_state) in cs_states { + self.store_consensus_state( + res.client_id.clone(), + height, + cs_state, + )?; + } + }, + }, + } + Ok(()) + }, + } + } + + /// Called upon successful client creation + fn store_client_type( + &mut self, + client_id: ClientId, + client_type: ClientType, + ) -> Result<(), Error>; + + /// Called upon successful client creation and update + fn store_client_state( + &mut self, + client_id: ClientId, + client_state: Self::AnyClientState, + ) -> Result<(), Error>; + + /// Called upon successful client creation and update + fn store_consensus_state( + &mut self, + client_id: ClientId, + height: Height, + consensus_state: Self::AnyConsensusState, + ) -> Result<(), Error>; + + /// Called upon client creation. + /// Increases the counter which keeps track of how many clients have been created. + /// Should never fail. + fn increase_client_counter(&mut self); + + /// Called upon successful client update. + /// Implementations are expected to use this to record the specified time as the time at which + /// this update (or header) was processed. + fn store_update_time( + &mut self, + client_id: ClientId, + height: Height, + timestamp: Timestamp, + ) -> Result<(), Error>; + + /// Called upon successful client update. + /// Implementations are expected to use this to record the specified height as the height at + /// at which this update (or header) was processed. + fn store_update_height( + &mut self, + client_id: ClientId, + height: Height, + host_height: Height, + ) -> Result<(), Error>; + + /// validates the client parameters for a client of the running chain + /// This function is only used to validate the client state the counterparty stores for this + /// chain + fn validate_self_client(&self, client_state: &Self::AnyClientState) -> Result<(), Error>; } diff --git a/modules/src/core/ics02_client/error.rs b/modules/src/core/ics02_client/error.rs index fee8717689..350d5a9b16 100644 --- a/modules/src/core/ics02_client/error.rs +++ b/modules/src/core/ics02_client/error.rs @@ -4,279 +4,282 @@ use flex_error::{define_error, TraceError}; use tendermint::Error as TendermintError; use tendermint_proto::Error as TendermintProtoError; -use crate::clients::ics07_tendermint::error::Error as Ics07Error; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::height::HeightError; -use crate::core::ics23_commitment::error::Error as Ics23Error; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::ClientId; -use crate::signer::SignerError; -use crate::timestamp::Timestamp; -use crate::Height; +use crate::{ + core::{ + ics02_client::{client_state::ClientType, height::HeightError}, + ics23_commitment::error::Error as Ics23Error, + ics24_host::{error::ValidationError, identifier::ClientId}, + }, + signer::SignerError, + timestamp::Timestamp, + Height, +}; define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - UnknownClientType - { client_type: String } - | e | { format_args!("unknown client type: {0}", e.client_type) }, - - ClientIdentifierConstructor - { client_type: ClientType, counter: u64 } - [ ValidationError ] - | e | { - format_args!("Client identifier constructor failed for type {0} with counter {1}", - e.client_type, e.counter) - }, - - ClientAlreadyExists - { client_id: ClientId } - | e | { format_args!("client already exists: {0}", e.client_id) }, - - ClientNotFound - { client_id: ClientId } - | e | { format_args!("client not found: {0}", e.client_id) }, - - ClientFrozen - { client_id: ClientId } - | e | { format_args!("client is frozen: {0}", e.client_id) }, - - ConsensusStateNotFound - { client_id: ClientId, height: Height } - | e | { - format_args!("consensus state not found at: {0} at height {1}", - e.client_id, e.height) - }, - - ImplementationSpecific - | _ | { "implementation specific error" }, - - HeaderVerificationFailure - { reason: String } - | e | { format_args!("header verification failed with reason: {}", e.reason) }, - - InvalidTrustThreshold - { numerator: u64, denominator: u64 } - | e | { format_args!("failed to build trust threshold from fraction: {}/{}", e.numerator, e.denominator) }, - - FailedTrustThresholdConversion - { numerator: u64, denominator: u64 } - [ TendermintError ] - | e | { format_args!("failed to build Tendermint domain type trust threshold from fraction: {}/{}", e.numerator, e.denominator) }, - - UnknownClientStateType - { client_state_type: String } - | e | { format_args!("unknown client state type: {0}", e.client_state_type) }, - - EmptyClientStateResponse - | _ | { "the client state was not found" }, - - EmptyPrefix - | _ | { "empty prefix" }, - - UnknownConsensusStateType - { consensus_state_type: String } - | e | { - format_args!("unknown client consensus state type: {0}", - e.consensus_state_type) - }, - - EmptyConsensusStateResponse - | _ | { "the client consensus state was not found" }, - - UnknownHeaderType - { header_type: String } - | e | { - format_args!("unknown header type: {0}", - e.header_type) - }, - - UnknownMisbehaviourType - { misbehavior_type: String } - | e | { - format_args!("unknown misbehaviour type: {0}", - e.misbehavior_type) - }, - - InvalidRawClientId - { client_id: String } - [ ValidationError ] - | e | { - format_args!("invalid raw client identifier {0}", - e.client_id) - }, - - DecodeRawClientState - [ TraceError ] - | _ | { "error decoding raw client state" }, - - MissingRawClientState - | _ | { "missing raw client state" }, - - InvalidRawConsensusState - [ TraceError ] - | _ | { "invalid raw client consensus state" }, - - MissingRawConsensusState - | _ | { "missing raw client consensus state" }, - - InvalidMsgUpdateClientId - [ ValidationError ] - | _ | { "invalid client id in the update client message" }, - - Decode - [ TraceError ] - | _ | { "decode error" }, - - MissingHeight - | _ | { "invalid raw client consensus state: the height field is missing" }, - - InvalidClientIdentifier - [ ValidationError ] - | _ | { "invalid client identifier" }, - - InvalidRawHeader - [ TraceError ] - | _ | { "invalid raw header" }, - - MissingRawHeader - | _ | { "missing raw header" }, - - DecodeRawMisbehaviour - [ TraceError ] - | _ | { "invalid raw misbehaviour" }, - - InvalidRawMisbehaviour - [ ValidationError ] - | _ | { "invalid raw misbehaviour" }, - - MissingRawMisbehaviour - | _ | { "missing raw misbehaviour" }, - - InvalidStringAsHeight - { value: String } - [ HeightError ] - | e | { format_args!("String {0} cannnot be converted to height", e.value) }, - - InvalidHeightResult - | _ | { "height cannot end up zero or negative" }, - - InvalidAddress - | _ | { "invalid address" }, - - InvalidUpgradeClientProof - [ Ics23Error ] - | _ | { "invalid proof for the upgraded client state" }, - - InvalidUpgradeConsensusStateProof - [ Ics23Error ] - | _ | { "invalid proof for the upgraded consensus state" }, - - InvalidCommitmentProof - [ Ics23Error ] - | _ | { "invalid commitment proof bytes" }, - - Tendermint - [ Ics07Error ] - | _ | { "tendermint error" }, - - InvalidPacketTimestamp - [ crate::timestamp::ParseTimestampError ] - | _ | { "invalid packet timeout timestamp value" }, - - ClientArgsTypeMismatch - { client_type: ClientType } - | e | { - format_args!("mismatch between client and arguments types, expected: {0:?}", - e.client_type) - }, - - InsufficientVotingPower - { reason: String } - | e | { - format_args!("Insufficient overlap {}", e.reason) - }, - - RawClientAndConsensusStateTypesMismatch - { - state_type: ClientType, - consensus_type: ClientType, - } - | e | { - format_args!("mismatch in raw client consensus state {} with expected state {}", - e.state_type, e.consensus_type) - }, - - LowHeaderHeight - { - header_height: Height, - latest_height: Height - } - | e | { - format!("received header height ({:?}) is lower than (or equal to) client latest height ({:?})", - e.header_height, e.latest_height) - }, - - LowUpgradeHeight - { - upgraded_height: Height, - client_height: Height, - } - | e | { - format_args!("upgraded client height {} must be at greater than current client height {}", - e.upgraded_height, e.client_height) - }, - - InvalidConsensusStateTimestamp - { - time1: Timestamp, - time2: Timestamp, - } - | e | { - format_args!("timestamp is invalid or missing, timestamp={0}, now={1}", e.time1, e.time2) - }, - - HeaderNotWithinTrustPeriod - { - latest_time:Timestamp, - update_time: Timestamp, - } - | e | { - format_args!("header not withing trusting period: expires_at={0} now={1}", e.latest_time, e.update_time) - }, - - TendermintHandlerError - [ Ics07Error ] - | _ | { format_args!("Tendermint-specific handler error") }, - - MissingLocalConsensusState - { height: Height } - | e | { format_args!("the local consensus state could not be retrieved for height {}", e.height) }, - - InvalidConnectionEnd - [ TraceError] - | _ | { "invalid connection end" }, - - InvalidChannelEnd - [ TraceError] - | _ | { "invalid channel end" }, - - InvalidAnyClientState - [ TraceError] - | _ | { "invalid any client state" }, - - InvalidAnyConsensusState - [ TraceError ] - | _ | { "invalid any client consensus state" }, - - Signer - [ SignerError ] - | _ | { "failed to parse signer" }, - } -} - -impl From for Error { - fn from(e: Ics07Error) -> Error { - Error::tendermint_handler_error(e) - } + #[derive(Debug, PartialEq, Eq)] + Error { + ClientError + { client_type: String, inner: String } + |e| { format_args!("client '{}' error: {}", e.client_type, e.inner) }, + + UnknownClientType + { client_type: String } + | e | { format_args!("unknown client type: {0}", e.client_type) }, + + ClientIdentifierConstructor + { client_type: String, counter: u64 } + [ ValidationError ] + | e | { + format_args!("Client identifier constructor failed for type {0} with counter {1}", + e.client_type, e.counter) + }, + + ClientAlreadyExists + { client_id: ClientId } + | e | { format_args!("client already exists: {0}", e.client_id) }, + + ClientNotFound + { client_id: ClientId } + | e | { format_args!("client not found: {0}", e.client_id) }, + + ClientFrozen + { client_id: ClientId } + | e | { format_args!("client is frozen: {0}", e.client_id) }, + + ConsensusStateNotFound + { client_id: ClientId, height: Height } + | e | { + format_args!("consensus state not found at: {0} at height {1}", + e.client_id, e.height) + }, + + ImplementationSpecific + { reason: String } + | e | { format_args!("implementation specific error: {}", e.reason) }, + + HeaderVerificationFailure + { reason: String } + | e | { format_args!("header verification failed with reason: {}", e.reason) }, + + InvalidTrustThreshold + { numerator: u64, denominator: u64 } + | e | { format_args!("failed to build trust threshold from fraction: {}/{}", e.numerator, e.denominator) }, + + FailedTrustThresholdConversion + { numerator: u64, denominator: u64 } + [ TendermintError ] + | e | { format_args!("failed to build Tendermint domain type trust threshold from fraction: {}/{}", e.numerator, e.denominator) }, + + UnknownClientStateType + { client_state_type: String } + | e | { format_args!("unknown client state type: {0}", e.client_state_type) }, + + EmptyClientStateResponse + | _ | { "the client state was not found" }, + + EmptyPrefix + | _ | { "empty prefix" }, + + UnknownConsensusStateType + { consensus_state_type: String } + | e | { + format_args!("unknown client consensus state type: {0}", + e.consensus_state_type) + }, + + EmptyConsensusStateResponse + | _ | { "the client consensus state was not found" }, + + UnknownHeaderType + { header_type: String } + | e | { + format_args!("unknown header type: {0}", + e.header_type) + }, + + UnknownMisbehaviourType + { misbehavior_type: String } + | e | { + format_args!("unknown misbehaviour type: {0}", + e.misbehavior_type) + }, + + InvalidRawClientId + { client_id: String } + [ ValidationError ] + | e | { + format_args!("invalid raw client identifier {0}", + e.client_id) + }, + + DecodeRawClientState + [ TraceError ] + | _ | { "error decoding raw client state" }, + + DecodeRawHeader + [ TraceError ] + | _ | { "error decoding raw header" }, + + DecodeRawConsensusState + [ TraceError ] + | _ | { "error decoding raw header" }, + + MissingRawClientState + | _ | { "missing raw client state" }, + + InvalidRawConsensusState + [ TraceError ] + | _ | { "invalid raw client consensus state" }, + + MissingRawConsensusState + | _ | { "missing raw client consensus state" }, + + InvalidMsgUpdateClientId + [ ValidationError ] + | _ | { "invalid client id in the update client message" }, + + Decode + [ TraceError ] + | _ | { "decode error" }, + + MissingHeight + | _ | { "invalid raw client consensus state: the height field is missing" }, + + InvalidClientIdentifier + [ ValidationError ] + | _ | { "invalid client identifier" }, + + InvalidRawHeader + [ TraceError ] + | _ | { "invalid raw header" }, + + MissingRawHeader + | _ | { "missing raw header" }, + + MissingRawClientMessage + | _ | { "missing raw client message" }, + + DecodeRawMisbehaviour + [ TraceError ] + | _ | { "invalid raw misbehaviour" }, + + InvalidRawMisbehaviour + [ ValidationError ] + | _ | { "invalid raw misbehaviour" }, + + MissingRawMisbehaviour + | _ | { "missing raw misbehaviour" }, + + InvalidStringAsHeight + { value: String } + [ HeightError ] + | e | { format_args!("String {0} cannnot be converted to height", e.value) }, + + InvalidHeightResult + | _ | { "height cannot end up zero or negative" }, + + InvalidAddress + | _ | { "invalid address" }, + + InvalidUpgradeClientProof + [ Ics23Error ] + | _ | { "invalid proof for the upgraded client state" }, + + InvalidUpgradeConsensusStateProof + [ Ics23Error ] + | _ | { "invalid proof for the upgraded consensus state" }, + + InvalidCommitmentProof + [ Ics23Error ] + | _ | { "invalid commitment proof bytes" }, + + InvalidPacketTimestamp + [ crate::timestamp::ParseTimestampError ] + | _ | { "invalid packet timeout timestamp value" }, + + ClientArgsTypeMismatch + { client_type: String } + | e | { + format_args!("mismatch between client and arguments types, expected: {0:?}", + e.client_type) + }, + + InsufficientVotingPower + { reason: String } + | e | { + format_args!("Insufficient overlap {}", e.reason) + }, + + RawClientAndConsensusStateTypesMismatch + { + state_type: ClientType, + consensus_type: ClientType, + } + | e | { + format_args!("mismatch in raw client consensus state {} with expected state {}", + e.state_type, e.consensus_type) + }, + + LowHeaderHeight + { + header_height: Height, + latest_height: Height + } + | e | { + format!("received header height ({:?}) is lower than (or equal to) client latest height ({:?})", + e.header_height, e.latest_height) + }, + + LowUpgradeHeight + { + upgraded_height: Height, + client_height: Height, + } + | e | { + format_args!("upgraded client height {} must be at greater than current client height {}", + e.upgraded_height, e.client_height) + }, + + InvalidConsensusStateTimestamp + { + time1: Timestamp, + time2: Timestamp, + } + | e | { + format_args!("timestamp is invalid or missing, timestamp={0}, now={1}", e.time1, e.time2) + }, + + HeaderNotWithinTrustPeriod + { + latest_time:Timestamp, + update_time: Timestamp, + } + | e | { + format_args!("header not withing trusting period: expires_at={0} now={1}", e.latest_time, e.update_time) + }, + + MissingLocalConsensusState + { height: Height } + | e | { format_args!("the local consensus state could not be retrieved for height {}", e.height) }, + + InvalidConnectionEnd + [ TraceError] + | _ | { "invalid connection end" }, + + InvalidChannelEnd + [ TraceError] + | _ | { "invalid channel end" }, + + InvalidAnyClientState + [ TraceError] + | _ | { "invalid any client state" }, + + InvalidAnyConsensusState + [ TraceError ] + | _ | { "invalid any client consensus state" }, + + Signer + [ SignerError ] + | _ | { "failed to parse signer" }, + } } diff --git a/modules/src/core/ics02_client/events.rs b/modules/src/core/ics02_client/events.rs index f858a7549f..6e5b41273f 100644 --- a/modules/src/core/ics02_client/events.rs +++ b/modules/src/core/ics02_client/events.rs @@ -1,16 +1,12 @@ //! Types for the IBC events emitted from Tendermint Websocket by the client module. +use crate::{ + core::{ics02_client::height::Height, ics24_host::identifier::ClientId}, + events::IbcEvent, + prelude::*, +}; use serde_derive::{Deserialize, Serialize}; -use tendermint::abci::tag::Tag; -use tendermint::abci::Event as AbciEvent; - -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::header::AnyHeader; -use crate::core::ics02_client::height::Height; -use crate::core::ics24_host::identifier::ClientId; -use crate::events::{IbcEvent, IbcEventType}; -use crate::prelude::*; +use tendermint::abci::EventAttribute; /// The content of the `key` field for the attribute containing the height. const HEIGHT_ATTRIBUTE_KEY: &str = "height"; @@ -24,119 +20,49 @@ const CLIENT_TYPE_ATTRIBUTE_KEY: &str = "client_type"; /// The content of the `key` field for the attribute containing the height. const CONSENSUS_HEIGHT_ATTRIBUTE_KEY: &str = "consensus_height"; -/// The content of the `key` field for the header in update client event. -const HEADER_ATTRIBUTE_KEY: &str = "header"; - -pub fn try_from_tx(event: &AbciEvent) -> Option { - match event.type_str.parse() { - Ok(IbcEventType::CreateClient) => extract_attributes_from_tx(event) - .map(CreateClient) - .map(IbcEvent::CreateClient) - .ok(), - Ok(IbcEventType::UpdateClient) => match extract_attributes_from_tx(event) { - Ok(attributes) => Some(IbcEvent::UpdateClient(UpdateClient { - common: attributes, - header: extract_header_from_tx(event).ok(), - })), - Err(_) => None, - }, - Ok(IbcEventType::ClientMisbehaviour) => extract_attributes_from_tx(event) - .map(ClientMisbehaviour) - .map(IbcEvent::ClientMisbehaviour) - .ok(), - Ok(IbcEventType::UpgradeClient) => extract_attributes_from_tx(event) - .map(UpgradeClient) - .map(IbcEvent::UpgradeClient) - .ok(), - _ => None, - } -} - -fn extract_attributes_from_tx(event: &AbciEvent) -> Result { - let mut attr = Attributes::default(); - - for tag in &event.attributes { - let key = tag.key.as_ref(); - let value = tag.value.as_ref(); - match key { - HEIGHT_ATTRIBUTE_KEY => { - attr.height = value - .parse() - .map_err(|e| Error::invalid_string_as_height(value.to_string(), e))? - } - CLIENT_ID_ATTRIBUTE_KEY => { - attr.client_id = value.parse().map_err(Error::invalid_client_identifier)? - } - CLIENT_TYPE_ATTRIBUTE_KEY => { - attr.client_type = value - .parse() - .map_err(|_| Error::unknown_client_type(value.to_string()))? - } - CONSENSUS_HEIGHT_ATTRIBUTE_KEY => { - attr.consensus_height = value - .parse() - .map_err(|e| Error::invalid_string_as_height(value.to_string(), e))? - } - _ => {} - } - } - - Ok(attr) -} - -pub fn extract_header_from_tx(event: &AbciEvent) -> Result { - for tag in &event.attributes { - let key = tag.key.as_ref(); - let value = tag.value.as_ref(); - if key == HEADER_ATTRIBUTE_KEY { - return AnyHeader::decode_from_string(value); - } - } - Err(Error::missing_raw_header()) -} - /// NewBlock event signals the committing & execution of a new block. // TODO - find a better place for NewBlock #[derive(Debug, Deserialize, Serialize, Clone, Copy, PartialEq, Eq)] pub struct NewBlock { - pub height: Height, + pub height: Height, } impl NewBlock { - pub fn new(h: Height) -> NewBlock { - NewBlock { height: h } - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - pub fn height(&self) -> Height { - self.height - } + pub fn new(h: Height) -> NewBlock { + NewBlock { height: h } + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + pub fn height(&self) -> Height { + self.height + } } impl From for IbcEvent { - fn from(v: NewBlock) -> Self { - IbcEvent::NewBlock(v) - } + fn from(v: NewBlock) -> Self { + IbcEvent::NewBlock(v) + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Attributes { - pub height: Height, - pub client_id: ClientId, - pub client_type: ClientType, - pub consensus_height: Height, + pub height: Height, + pub client_id: ClientId, + pub client_type: String, + pub consensus_height: Height, } +#[cfg(not(test))] impl Default for Attributes { - fn default() -> Self { - Attributes { - height: Height::default(), - client_id: Default::default(), - client_type: ClientType::Tendermint, - consensus_height: Height::default(), - } - } + fn default() -> Self { + Attributes { + height: Height::default(), + client_id: Default::default(), + client_type: "00-uninitialized".to_owned(), + consensus_height: Height::default(), + } + } } /// Convert attributes to Tendermint ABCI tags @@ -147,36 +73,36 @@ impl Default for Attributes { /// is infallible, even if it is not represented in the error type. /// Once tendermint-rs improves the API of the `Key` and `Value` types, /// we will be able to remove the `.parse().unwrap()` calls. -impl From for Vec { - fn from(a: Attributes) -> Self { - let height = Tag { - key: HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), - value: a.height.to_string().parse().unwrap(), - }; - let client_id = Tag { - key: CLIENT_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: a.client_id.to_string().parse().unwrap(), - }; - let client_type = Tag { - key: CLIENT_TYPE_ATTRIBUTE_KEY.parse().unwrap(), - value: a.client_type.as_str().parse().unwrap(), - }; - let consensus_height = Tag { - key: CONSENSUS_HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), - value: a.height.to_string().parse().unwrap(), - }; - vec![height, client_id, client_type, consensus_height] - } +impl From for Vec { + fn from(a: Attributes) -> Self { + let height = EventAttribute { + key: HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), + value: a.height.to_string().parse().unwrap(), + index: false, + }; + let client_id = EventAttribute { + key: CLIENT_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: a.client_id.to_string().parse().unwrap(), + index: false, + }; + let client_type = EventAttribute { + key: CLIENT_TYPE_ATTRIBUTE_KEY.parse().unwrap(), + value: a.client_type.to_owned(), + index: false, + }; + let consensus_height = EventAttribute { + key: CONSENSUS_HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), + value: a.height.to_string().parse().unwrap(), + index: false, + }; + vec![height, client_id, client_type, consensus_height] + } } impl core::fmt::Display for Attributes { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!( - f, - "h: {}, cs_h: {}({})", - self.height, self.client_id, self.consensus_height - ) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "h: {}, cs_h: {}({})", self.height, self.client_id, self.consensus_height) + } } /// CreateClient event signals the creation of a new on-chain client (IBC client). @@ -184,115 +110,87 @@ impl core::fmt::Display for Attributes { pub struct CreateClient(pub Attributes); impl CreateClient { - pub fn client_id(&self) -> &ClientId { - &self.0.client_id - } - pub fn height(&self) -> Height { - self.0.height - } - pub fn set_height(&mut self, height: Height) { - self.0.height = height; - } + pub fn client_id(&self) -> &ClientId { + &self.0.client_id + } + pub fn height(&self) -> Height { + self.0.height + } + pub fn set_height(&mut self, height: Height) { + self.0.height = height; + } } impl From for CreateClient { - fn from(attrs: Attributes) -> Self { - CreateClient(attrs) - } + fn from(attrs: Attributes) -> Self { + CreateClient(attrs) + } } impl From for IbcEvent { - fn from(v: CreateClient) -> Self { - IbcEvent::CreateClient(v) - } -} - -impl From for AbciEvent { - fn from(v: CreateClient) -> Self { - let attributes = Vec::::from(v.0); - AbciEvent { - type_str: IbcEventType::CreateClient.as_str().to_string(), - attributes, - } - } + fn from(v: CreateClient) -> Self { + IbcEvent::CreateClient(v) + } } impl core::fmt::Display for CreateClient { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "{}", self.0) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "{}", self.0) + } } /// UpdateClient event signals a recent update of an on-chain client (IBC Client). +// TODO: use generic header type #[derive(Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct UpdateClient { - pub common: Attributes, - pub header: Option, + pub common: Attributes, + pub header: Option>, } impl UpdateClient { - pub fn client_id(&self) -> &ClientId { - &self.common.client_id - } - pub fn client_type(&self) -> ClientType { - self.common.client_type - } - - pub fn height(&self) -> Height { - self.common.height - } - - pub fn set_height(&mut self, height: Height) { - self.common.height = height; - } - - pub fn consensus_height(&self) -> Height { - self.common.consensus_height - } + pub fn client_id(&self) -> &ClientId { + &self.common.client_id + } + + pub fn client_type(&self) -> &str { + &self.common.client_type + } + + pub fn height(&self) -> Height { + self.common.height + } + + pub fn set_height(&mut self, height: Height) { + self.common.height = height; + } + + pub fn consensus_height(&self) -> Height { + self.common.consensus_height + } } impl From for UpdateClient { - fn from(attrs: Attributes) -> Self { - UpdateClient { - common: attrs, - header: None, - } - } + fn from(attrs: Attributes) -> Self { + UpdateClient { common: attrs, header: None } + } } impl From for IbcEvent { - fn from(v: UpdateClient) -> Self { - IbcEvent::UpdateClient(v) - } -} - -impl From for AbciEvent { - fn from(v: UpdateClient) -> Self { - let mut attributes = Vec::::from(v.common); - if let Some(h) = v.header { - let header = Tag { - key: HEADER_ATTRIBUTE_KEY.parse().unwrap(), - value: h.encode_to_string().parse().unwrap(), - }; - attributes.push(header); - } - AbciEvent { - type_str: IbcEventType::UpdateClient.as_str().to_string(), - attributes, - } - } + fn from(v: UpdateClient) -> Self { + IbcEvent::UpdateClient(v) + } } impl core::fmt::Display for UpdateClient { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "{}", self.common) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "{}", self.common) + } } impl core::fmt::Debug for UpdateClient { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "{}", self.common) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", self.common) + } } /// ClientMisbehaviour event signals the update of an on-chain client (IBC Client) with evidence of @@ -301,37 +199,27 @@ impl core::fmt::Debug for UpdateClient { pub struct ClientMisbehaviour(pub Attributes); impl ClientMisbehaviour { - pub fn client_id(&self) -> &ClientId { - &self.0.client_id - } - pub fn height(&self) -> Height { - self.0.height - } - pub fn set_height(&mut self, height: Height) { - self.0.height = height; - } + pub fn client_id(&self) -> &ClientId { + &self.0.client_id + } + pub fn height(&self) -> Height { + self.0.height + } + pub fn set_height(&mut self, height: Height) { + self.0.height = height; + } } impl From for ClientMisbehaviour { - fn from(attrs: Attributes) -> Self { - ClientMisbehaviour(attrs) - } + fn from(attrs: Attributes) -> Self { + ClientMisbehaviour(attrs) + } } impl From for IbcEvent { - fn from(v: ClientMisbehaviour) -> Self { - IbcEvent::ClientMisbehaviour(v) - } -} - -impl From for AbciEvent { - fn from(v: ClientMisbehaviour) -> Self { - let attributes = Vec::::from(v.0); - AbciEvent { - type_str: IbcEventType::ClientMisbehaviour.as_str().to_string(), - attributes, - } - } + fn from(v: ClientMisbehaviour) -> Self { + IbcEvent::ClientMisbehaviour(v) + } } /// Signals a recent upgrade of an on-chain client (IBC Client). @@ -339,74 +227,19 @@ impl From for AbciEvent { pub struct UpgradeClient(pub Attributes); impl UpgradeClient { - pub fn set_height(&mut self, height: Height) { - self.0.height = height; - } - pub fn height(&self) -> Height { - self.0.height - } - pub fn client_id(&self) -> &ClientId { - &self.0.client_id - } + pub fn set_height(&mut self, height: Height) { + self.0.height = height; + } + pub fn height(&self) -> Height { + self.0.height + } + pub fn client_id(&self) -> &ClientId { + &self.0.client_id + } } impl From for UpgradeClient { - fn from(attrs: Attributes) -> Self { - UpgradeClient(attrs) - } -} - -impl From for AbciEvent { - fn from(v: UpgradeClient) -> Self { - let attributes = Vec::::from(v.0); - AbciEvent { - type_str: IbcEventType::UpgradeClient.as_str().to_string(), - attributes, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::core::ics02_client::header::Header; - use crate::mock::header::MockHeader; - - #[test] - fn client_event_to_abci_event() { - let height = Height::new(1, 1); - let attributes = Attributes { - height, - client_id: "test_client".parse().unwrap(), - client_type: ClientType::Tendermint, - consensus_height: height, - }; - let mut abci_events = vec![]; - let create_client = CreateClient::from(attributes.clone()); - abci_events.push(AbciEvent::from(create_client.clone())); - let client_misbehaviour = ClientMisbehaviour::from(attributes.clone()); - abci_events.push(AbciEvent::from(client_misbehaviour.clone())); - let upgrade_client = UpgradeClient::from(attributes.clone()); - abci_events.push(AbciEvent::from(upgrade_client.clone())); - let mut update_client = UpdateClient::from(attributes); - let header = MockHeader::new(height).wrap_any(); - update_client.header = Some(header); - abci_events.push(AbciEvent::from(update_client.clone())); - - for event in abci_events { - match try_from_tx(&event) { - Some(e) => match e { - IbcEvent::CreateClient(e) => assert_eq!(e.0, create_client.0), - IbcEvent::ClientMisbehaviour(e) => assert_eq!(e.0, client_misbehaviour.0), - IbcEvent::UpgradeClient(e) => assert_eq!(e.0, upgrade_client.0), - IbcEvent::UpdateClient(e) => { - assert_eq!(e.common, update_client.common); - assert_eq!(e.header, update_client.header); - } - _ => panic!("unexpected event type"), - }, - None => panic!("converted event was wrong"), - } - } - } + fn from(attrs: Attributes) -> Self { + UpgradeClient(attrs) + } } diff --git a/modules/src/core/ics02_client/handler.rs b/modules/src/core/ics02_client/handler.rs index 9b2980e917..3ef0e53bcf 100644 --- a/modules/src/core/ics02_client/handler.rs +++ b/modules/src/core/ics02_client/handler.rs @@ -1,32 +1,37 @@ //! This module implements the processing logic for ICS2 (client abstractions and functions) msgs. -use crate::core::ics02_client::context::ClientReader; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::msgs::ClientMsg; -use crate::handler::HandlerOutput; +use crate::core::ics02_client::context::ClientTypes; +use crate::{ + core::{ + ics02_client::{error::Error, msgs::ClientMsg}, + ics26_routing::context::ReaderContext, + }, + handler::HandlerOutput, +}; +use core::fmt::Debug; pub mod create_client; pub mod update_client; pub mod upgrade_client; #[derive(Clone, Debug, PartialEq, Eq)] -pub enum ClientResult { - Create(create_client::Result), - Update(update_client::Result), - Upgrade(upgrade_client::Result), +pub enum ClientResult { + Create(create_client::Result), + Update(update_client::Result), + Upgrade(upgrade_client::Result), } /// General entry point for processing any message related to ICS2 (client functions) protocols. -pub fn dispatch(ctx: &Ctx, msg: ClientMsg) -> Result, Error> +pub fn dispatch( + ctx: &Ctx, + msg: ClientMsg, +) -> Result>, Error> where - Ctx: ClientReader, + Ctx: ReaderContext, { - match msg { - ClientMsg::CreateClient(msg) => create_client::process(ctx, msg), - ClientMsg::UpdateClient(msg) => update_client::process(ctx, msg), - ClientMsg::UpgradeClient(msg) => upgrade_client::process(ctx, msg), - _ => { - unimplemented!() - } - } + match msg { + ClientMsg::CreateClient(msg) => create_client::process::<_>(ctx, msg), + ClientMsg::UpdateClient(msg) => update_client::process::<_>(ctx, msg), + ClientMsg::UpgradeClient(msg) => upgrade_client::process::<_>(ctx, msg), + } } diff --git a/modules/src/core/ics02_client/handler/create_client.rs b/modules/src/core/ics02_client/handler/create_client.rs index 6175147694..24e1c0260c 100644 --- a/modules/src/core/ics02_client/handler/create_client.rs +++ b/modules/src/core/ics02_client/handler/create_client.rs @@ -1,294 +1,216 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgCreateAnyClient`. -use crate::prelude::*; - -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::context::ClientReader; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::events::Attributes; -use crate::core::ics02_client::handler::ClientResult; -use crate::core::ics02_client::height::Height; -use crate::core::ics02_client::msgs::create_client::MsgCreateAnyClient; -use crate::core::ics24_host::identifier::ClientId; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::timestamp::Timestamp; +use crate::{core::ics26_routing::context::ReaderContext, prelude::*}; +use core::fmt::Debug; + +use crate::core::ics02_client::client_state::{ClientState, ClientType}; + +use crate::core::ics02_client::context::ClientTypes; +use crate::{ + core::{ + ics02_client::{ + error::Error, events::Attributes, handler::ClientResult, height::Height, + msgs::create_client::MsgCreateAnyClient, + }, + ics24_host::identifier::ClientId, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + timestamp::Timestamp, +}; /// The result following the successful processing of a `MsgCreateAnyClient` message. Preferably /// this data type should be used with a qualified name `create_client::Result` to avoid ambiguity. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct Result { - pub client_id: ClientId, - pub client_type: ClientType, - pub client_state: AnyClientState, - pub consensus_state: AnyConsensusState, - pub processed_time: Timestamp, - pub processed_height: Height, +pub struct Result { + pub client_id: ClientId, + pub client_type: ClientType, + pub client_state: C::AnyClientState, + pub consensus_state: C::AnyConsensusState, + pub processed_time: Timestamp, + pub processed_height: Height, } -pub fn process( - ctx: &dyn ClientReader, - msg: MsgCreateAnyClient, -) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Construct this client's identifier - let id_counter = ctx.client_counter()?; - let client_id = ClientId::new(msg.client_state.client_type(), id_counter).map_err(|e| { - Error::client_identifier_constructor(msg.client_state.client_type(), id_counter, e) - })?; - - output.log(format!( - "success: generated new client identifier: {}", - client_id - )); - - let result = ClientResult::Create(Result { - client_id: client_id.clone(), - client_type: msg.client_state.client_type(), - client_state: msg.client_state.clone(), - consensus_state: msg.consensus_state, - processed_time: ctx.host_timestamp(), - processed_height: ctx.host_height(), - }); - - let event_attributes = Attributes { - client_id, - height: ctx.host_height(), - ..Default::default() - }; - output.emit(IbcEvent::CreateClient(event_attributes.into())); - - Ok(output.with_result(result)) +pub fn process( + ctx: &Ctx, + msg: MsgCreateAnyClient, +) -> HandlerResult, Error> +where + Ctx: ReaderContext + Eq + Debug + Clone, +{ + let mut output = HandlerOutput::builder(); + + // Construct this client's identifier + let id_counter = ctx.client_counter()?; + let client_type = msg.client_state.client_type(); + let client_id = ClientId::new(&client_type, id_counter) + .map_err(|e| Error::client_identifier_constructor(client_type.to_owned(), id_counter, e))?; + + output.log(format!("success: generated new client identifier: {}", client_id)); + + let result = ClientResult::Create(Result { + client_id: client_id.clone(), + client_type: msg.client_state.client_type(), + client_state: msg.client_state.clone(), + consensus_state: msg.consensus_state, + processed_time: ctx.host_timestamp(), + processed_height: ctx.host_height(), + }); + + let event_attributes = Attributes { + client_id, + height: ctx.host_height(), + client_type: msg.client_state.client_type().to_owned(), + consensus_height: msg.client_state.latest_height(), + }; + output.emit(IbcEvent::CreateClient(event_attributes.into())); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use core::time::Duration; - use test_log::test; - - use crate::clients::ics07_tendermint::client_state::{ - AllowUpdate, ClientState as TendermintClientState, - }; - use crate::clients::ics07_tendermint::header::test_util::get_dummy_tendermint_header; - use crate::core::ics02_client::client_consensus::AnyConsensusState; - use crate::core::ics02_client::client_state::ClientState; - use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics02_client::handler::{dispatch, ClientResult}; - use crate::core::ics02_client::msgs::create_client::MsgCreateAnyClient; - use crate::core::ics02_client::msgs::ClientMsg; - use crate::core::ics02_client::trust_threshold::TrustThreshold; - use crate::core::ics23_commitment::specs::ProofSpecs; - use crate::core::ics24_host::identifier::ClientId; - use crate::events::IbcEvent; - use crate::handler::HandlerOutput; - use crate::mock::client_state::{MockClientState, MockConsensusState}; - use crate::mock::context::MockContext; - use crate::mock::header::MockHeader; - use crate::test_utils::get_dummy_account_id; - use crate::Height; - - #[test] - fn test_create_client_ok() { - let ctx = MockContext::default(); - let signer = get_dummy_account_id(); - let height = Height::new(0, 42); - - let msg = MsgCreateAnyClient::new( - MockClientState::new(MockHeader::new(height)).into(), - MockConsensusState::new(MockHeader::new(height)).into(), - signer, - ) - .unwrap(); - - let output = dispatch(&ctx, ClientMsg::CreateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, mut events, .. - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - let expected_client_id = ClientId::new(ClientType::Mock, 0).unwrap(); - assert!( - matches!(event, IbcEvent::CreateClient(ref e) if e.client_id() == &expected_client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - match result { - ClientResult::Create(create_result) => { - assert_eq!(create_result.client_type, ClientType::Mock); - assert_eq!(create_result.client_id, expected_client_id); - assert_eq!(create_result.client_state, msg.client_state); - assert_eq!(create_result.consensus_state, msg.consensus_state); - } - _ => { - panic!("unexpected result type: expected ClientResult::CreateResult!"); - } - } - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } - - #[test] - fn test_create_client_ok_multiple() { - let existing_client_id = ClientId::default(); - let signer = get_dummy_account_id(); - let height = Height::new(0, 80); - - let ctx = MockContext::default().with_client(&existing_client_id, height); - - let create_client_msgs: Vec = vec![ - MsgCreateAnyClient::new( - MockClientState::new(MockHeader::new(Height { - revision_height: 42, - ..height - })) - .into(), - MockConsensusState::new(MockHeader::new(Height { - revision_height: 42, - ..height - })) - .into(), - signer.clone(), - ) - .unwrap(), - MsgCreateAnyClient::new( - MockClientState::new(MockHeader::new(Height { - revision_height: 42, - ..height - })) - .into(), - MockConsensusState::new(MockHeader::new(Height { - revision_height: 42, - ..height - })) - .into(), - signer.clone(), - ) - .unwrap(), - MsgCreateAnyClient::new( - MockClientState::new(MockHeader::new(Height { - revision_height: 50, - ..height - })) - .into(), - MockConsensusState::new(MockHeader::new(Height { - revision_height: 50, - ..height - })) - .into(), - signer, - ) - .unwrap(), - ] - .into_iter() - .collect(); - - // The expected client id that will be generated will be identical to "9999-mock-0" for all - // tests. This is because we're not persisting any client results (which is done via the - // tests for `ics26_routing::dispatch`. - let expected_client_id = ClientId::new(ClientType::Mock, 0).unwrap(); - - for msg in create_client_msgs { - let output = dispatch(&ctx, ClientMsg::CreateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, mut events, .. - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - assert!( - matches!(event, IbcEvent::CreateClient(ref e) if e.client_id() == &expected_client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - match result { - ClientResult::Create(create_res) => { - assert_eq!(create_res.client_type, msg.client_state.client_type()); - assert_eq!(create_res.client_id, expected_client_id); - assert_eq!(create_res.client_state, msg.client_state); - assert_eq!(create_res.consensus_state, msg.consensus_state); - } - _ => { - panic!("expected result of type ClientResult::CreateResult"); - } - } - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } - } - - #[test] - fn test_tm_create_client_ok() { - let signer = get_dummy_account_id(); - - let ctx = MockContext::default(); - - let tm_header = get_dummy_tendermint_header(); - - let tm_client_state = TendermintClientState::new( - tm_header.chain_id.clone().into(), - TrustThreshold::ONE_THIRD, - Duration::from_secs(64000), - Duration::from_secs(128000), - Duration::from_millis(3000), - Height::new(0, u64::from(tm_header.height)), - ProofSpecs::default(), - vec!["".to_string()], - AllowUpdate { - after_expiry: false, - after_misbehaviour: false, - }, - ) - .unwrap() - .wrap_any(); - - let msg = MsgCreateAnyClient::new( - tm_client_state, - AnyConsensusState::Tendermint(tm_header.try_into().unwrap()), - signer, - ) - .unwrap(); - - let output = dispatch(&ctx, ClientMsg::CreateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, mut events, .. - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - let expected_client_id = ClientId::new(ClientType::Tendermint, 0).unwrap(); - assert!( - matches!(event, IbcEvent::CreateClient(ref e) if e.client_id() == &expected_client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - match result { - ClientResult::Create(create_res) => { - assert_eq!(create_res.client_type, ClientType::Tendermint); - assert_eq!(create_res.client_id, expected_client_id); - assert_eq!(create_res.client_state, msg.client_state); - assert_eq!(create_res.consensus_state, msg.consensus_state); - } - _ => { - panic!("expected result of type ClientResult::CreateResult"); - } - } - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } + use crate::core::ics02_client::client_state::ClientState; + + use crate::{ + core::{ + ics02_client::{ + context::ClientReader, + handler::{dispatch, ClientResult}, + msgs::{create_client::MsgCreateAnyClient, ClientMsg}, + }, + ics24_host::identifier::ClientId, + }, + events::IbcEvent, + handler::HandlerOutput, + mock::{ + client_state::{MockClientState, MockConsensusState}, + context::{MockClientTypes, MockContext}, + header::MockHeader, + }, + prelude::*, + test_utils::get_dummy_account_id, + Height, + }; + use test_log::test; + + #[test] + fn test_create_client_ok() { + let ctx = MockContext::::default(); + let signer = get_dummy_account_id(); + let height = Height::new(0, 42); + + let msg = MsgCreateAnyClient::new( + MockClientState::new(MockHeader::new(height).into()).into(), + MockConsensusState::new(MockHeader::new(height)).into(), + signer, + ) + .unwrap(); + + let output = dispatch(&ctx, ClientMsg::CreateClient(msg.clone())); + + match output { + Ok(HandlerOutput { result, mut events, .. }) => { + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + let expected_client_id = ClientId::new(&MockClientState::client_type(), 0).unwrap(); + assert!( + matches!(event, IbcEvent::CreateClient(ref e) if e.client_id() == &expected_client_id) + ); + assert_eq!(event.height(), ctx.host_height()); + match result { + ClientResult::Create(create_result) => { + assert_eq!(create_result.client_type, MockClientState::client_type()); + assert_eq!(create_result.client_id, expected_client_id); + assert_eq!(create_result.client_state, msg.client_state); + assert_eq!(create_result.consensus_state, msg.consensus_state); + }, + _ => { + panic!("unexpected result type: expected ClientResult::CreateResult!"); + }, + } + }, + Err(err) => { + panic!("unexpected error: {}", err); + }, + } + } + + #[test] + fn test_create_client_ok_multiple() { + let existing_client_id = ClientId::default(); + let signer = get_dummy_account_id(); + let height = Height::new(0, 80); + + let ctx = MockContext::default().with_client(&existing_client_id, height); + + let create_client_msgs: Vec>> = vec![ + MsgCreateAnyClient::new( + MockClientState::new( + MockHeader::new(Height { revision_height: 42, ..height }).into(), + ) + .into(), + MockConsensusState::new(MockHeader::new(Height { revision_height: 42, ..height })) + .into(), + signer.clone(), + ) + .unwrap(), + MsgCreateAnyClient::new( + MockClientState::new( + MockHeader::new(Height { revision_height: 42, ..height }).into(), + ) + .into(), + MockConsensusState::new(MockHeader::new(Height { revision_height: 42, ..height })) + .into(), + signer.clone(), + ) + .unwrap(), + MsgCreateAnyClient::new( + MockClientState::new( + MockHeader::new(Height { revision_height: 50, ..height }).into(), + ) + .into(), + MockConsensusState::new(MockHeader::new(Height { revision_height: 50, ..height })) + .into(), + signer, + ) + .unwrap(), + ] + .into_iter() + .collect(); + + // The expected client id that will be generated will be identical to "9999-mock-0" for all + // tests. This is because we're not persisting any client results (which is done via the + // tests for `ics26_routing::dispatch`. + let expected_client_id = ClientId::new(&MockClientState::client_type(), 0).unwrap(); + + for msg in create_client_msgs { + let output = dispatch(&ctx, ClientMsg::CreateClient(msg.clone())); + + match output { + Ok(HandlerOutput { result, mut events, .. }) => { + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + assert!( + matches!(event, IbcEvent::CreateClient(ref e) if e.client_id() == &expected_client_id) + ); + assert_eq!(event.height(), ctx.host_height()); + match result { + ClientResult::Create(create_res) => { + assert_eq!(create_res.client_type, msg.client_state.client_type()); + assert_eq!(create_res.client_id, expected_client_id); + assert_eq!(create_res.client_state, msg.client_state); + assert_eq!(create_res.consensus_state, msg.consensus_state); + }, + _ => { + panic!("expected result of type ClientResult::CreateResult"); + }, + } + }, + Err(err) => { + panic!("unexpected error: {}", err); + }, + } + } + } } diff --git a/modules/src/core/ics02_client/handler/update_client.rs b/modules/src/core/ics02_client/handler/update_client.rs index fd4e7a5fb3..b1c4645903 100644 --- a/modules/src/core/ics02_client/handler/update_client.rs +++ b/modules/src/core/ics02_client/handler/update_client.rs @@ -1,540 +1,283 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgUpdateAnyClient`. - -use tracing::debug; - -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_def::{AnyClient, ClientDef}; -use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; -use crate::core::ics02_client::context::ClientReader; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::events::Attributes; -use crate::core::ics02_client::handler::ClientResult; -use crate::core::ics02_client::header::Header; -use crate::core::ics02_client::height::Height; -use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; -use crate::core::ics24_host::identifier::ClientId; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; -use crate::timestamp::Timestamp; +use core::fmt::Debug; + +use crate::core::ics02_client::context::ClientTypes; +use crate::{ + core::{ + ics02_client::{ + client_consensus::ConsensusState, + client_def::{ClientDef, ConsensusUpdateResult}, + client_state::ClientState, + error::Error, + events::Attributes, + handler::ClientResult, + height::Height, + msgs::update_client::MsgUpdateAnyClient, + }, + ics24_host::identifier::ClientId, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, + timestamp::Timestamp, +}; /// The result following the successful processing of a `MsgUpdateAnyClient` message. Preferably /// this data type should be used with a qualified name `update_client::Result` to avoid ambiguity. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct Result { - pub client_id: ClientId, - pub client_state: AnyClientState, - pub consensus_state: AnyConsensusState, - pub processed_time: Timestamp, - pub processed_height: Height, +pub struct Result { + pub client_id: ClientId, + pub client_state: C::AnyClientState, + pub consensus_state: Option>, + pub processed_time: Timestamp, + pub processed_height: Height, } -pub fn process( - ctx: &dyn ClientReader, - msg: MsgUpdateAnyClient, -) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - let MsgUpdateAnyClient { - client_id, - header, - signer: _, - } = msg; - - // Read client type from the host chain store. The client should already exist. - let client_type = ctx.client_type(&client_id)?; - - let client_def = AnyClient::from_client_type(client_type); - - // Read client state from the host chain store. - let client_state = ctx.client_state(&client_id)?; - - if client_state.is_frozen() { - return Err(Error::client_frozen(client_id)); - } - - // Read consensus state from the host chain store. - let latest_consensus_state = ctx - .consensus_state(&client_id, client_state.latest_height()) - .map_err(|_| { - Error::consensus_state_not_found(client_id.clone(), client_state.latest_height()) - })?; - - debug!("latest consensus state: {:?}", latest_consensus_state); - - let now = ctx.host_timestamp(); - let duration = now - .duration_since(&latest_consensus_state.timestamp()) - .ok_or_else(|| { - Error::invalid_consensus_state_timestamp(latest_consensus_state.timestamp(), now) - })?; - - if client_state.expired(duration) { - return Err(Error::header_not_within_trust_period( - latest_consensus_state.timestamp(), - header.timestamp(), - )); - } - - // Use client_state to validate the new header against the latest consensus_state. - // This function will return the new client_state (its latest_height changed) and a - // consensus_state obtained from header. These will be later persisted by the keeper. - let (new_client_state, new_consensus_state) = client_def - .check_header_and_update_state(ctx, client_id.clone(), client_state, header) - .map_err(|e| Error::header_verification_failure(e.to_string()))?; - - let result = ClientResult::Update(Result { - client_id: client_id.clone(), - client_state: new_client_state, - consensus_state: new_consensus_state, - processed_time: ctx.host_timestamp(), - processed_height: ctx.host_height(), - }); - - let event_attributes = Attributes { - client_id, - height: ctx.host_height(), - ..Default::default() - }; - output.emit(IbcEvent::UpdateClient(event_attributes.into())); - - Ok(output.with_result(result)) +pub fn process( + ctx: &Ctx, + msg: MsgUpdateAnyClient, +) -> HandlerResult, Error> +where + Ctx: ReaderContext, +{ + let mut output = HandlerOutput::builder(); + + let MsgUpdateAnyClient { client_id, client_message, signer: _ } = msg; + + // Read client type from the host chain store. The client should already exist. + let client_type = ctx.client_type(&client_id)?; + + // Read client state from the host chain store. + let client_state = ctx.client_state(&client_id)?; + + let client_def = client_state.client_def(); + + if client_state.is_frozen() { + return Err(Error::client_frozen(client_id)); + } + + // Read consensus state from the host chain store. + let latest_consensus_state = + ctx.consensus_state(&client_id, client_state.latest_height()).map_err(|_| { + Error::consensus_state_not_found(client_id.clone(), client_state.latest_height()) + })?; + + tracing::debug!("latest consensus state: {:?}", latest_consensus_state); + + let now = ctx.host_timestamp(); + let duration = now.duration_since(&latest_consensus_state.timestamp()).ok_or_else(|| { + Error::invalid_consensus_state_timestamp(latest_consensus_state.timestamp(), now) + })?; + + if client_state.expired(duration) { + return Err(Error::header_not_within_trust_period(latest_consensus_state.timestamp(), now)); + } + + client_def + .verify_client_message::( + ctx, + client_id.clone(), + client_state.clone(), + client_message.clone(), + ) + .map_err(|e| Error::header_verification_failure(e.to_string()))?; + + let found_misbehaviour = client_def + .check_for_misbehaviour( + ctx, + client_id.clone(), + client_state.clone(), + client_message.clone(), + ) + .map_err(|e| Error::header_verification_failure(e.to_string()))?; + + let event_attributes = Attributes { + client_id: client_id.clone(), + height: ctx.host_height(), + client_type: client_type.to_owned(), + consensus_height: client_state.latest_height(), + }; + + if found_misbehaviour { + let client_state = client_def.update_state_on_misbehaviour(client_state, client_message)?; + let result = ClientResult::Update(Result { + client_id, + client_state, + consensus_state: None, + processed_time: ctx.host_timestamp(), + processed_height: ctx.host_height(), + }); + output.emit(IbcEvent::ClientMisbehaviour(event_attributes.into())); + return Ok(output.with_result(result)); + } + // Use client_state to validate the new header against the latest consensus_state. + // This function will return the new client_state (its latest_height changed) and a + // consensus_state obtained from header. These will be later persisted by the keeper. + let (new_client_state, new_consensus_state) = client_def + .update_state(ctx, client_id.clone(), client_state, client_message) + .map_err(|e| Error::header_verification_failure(e.to_string()))?; + + let result = ClientResult::::Update(Result { + client_id, + client_state: new_client_state, + consensus_state: Some(new_consensus_state), + processed_time: ctx.host_timestamp(), + processed_height: ctx.host_height(), + }); + + output.emit(IbcEvent::UpdateClient(event_attributes.into())); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use core::str::FromStr; - use test_log::test; - - use crate::core::ics02_client::client_consensus::AnyConsensusState; - use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; - use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics02_client::error::{Error, ErrorDetail}; - use crate::core::ics02_client::handler::dispatch; - use crate::core::ics02_client::handler::ClientResult::Update; - use crate::core::ics02_client::header::{AnyHeader, Header}; - use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; - use crate::core::ics02_client::msgs::ClientMsg; - use crate::core::ics24_host::identifier::{ChainId, ClientId}; - use crate::events::IbcEvent; - use crate::handler::HandlerOutput; - use crate::mock::client_state::MockClientState; - use crate::mock::context::MockContext; - use crate::mock::header::MockHeader; - use crate::mock::host::HostType; - use crate::prelude::*; - use crate::test_utils::get_dummy_account_id; - use crate::timestamp::Timestamp; - use crate::Height; - - #[test] - fn test_update_client_ok() { - let client_id = ClientId::default(); - let signer = get_dummy_account_id(); - - let timestamp = Timestamp::now(); - - let ctx = MockContext::default().with_client(&client_id, Height::new(0, 42)); - let msg = MsgUpdateAnyClient { - client_id: client_id.clone(), - header: MockHeader::new(Height::new(0, 46)) - .with_timestamp(timestamp) - .into(), - signer, - }; - - let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, - mut events, - log, - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - assert!( - matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - assert!(log.is_empty()); - // Check the result - match result { - Update(upd_res) => { - assert_eq!(upd_res.client_id, client_id); - assert_eq!( - upd_res.client_state, - AnyClientState::Mock(MockClientState::new( - MockHeader::new(msg.header.height()).with_timestamp(timestamp) - )) - ) - } - _ => panic!("update handler result has incorrect type"), - } - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } - - #[test] - fn test_update_nonexisting_client() { - let client_id = ClientId::from_str("mockclient1").unwrap(); - let signer = get_dummy_account_id(); - - let ctx = MockContext::default().with_client(&client_id, Height::new(0, 42)); - - let msg = MsgUpdateAnyClient { - client_id: ClientId::from_str("nonexistingclient").unwrap(), - header: MockHeader::new(Height::new(0, 46)).into(), - signer, - }; - - let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); - - match output { - Err(Error(ErrorDetail::ClientNotFound(e), _)) => { - assert_eq!(e.client_id, msg.client_id); - } - _ => { - panic!("expected ClientNotFound error, instead got {:?}", output) - } - } - } - - #[test] - fn test_update_client_ok_multiple() { - let client_ids = vec![ - ClientId::from_str("mockclient1").unwrap(), - ClientId::from_str("mockclient2").unwrap(), - ClientId::from_str("mockclient3").unwrap(), - ]; - let signer = get_dummy_account_id(); - let initial_height = Height::new(0, 45); - let update_height = Height::new(0, 49); - - let mut ctx = MockContext::default(); - - for cid in &client_ids { - ctx = ctx.with_client(cid, initial_height); - } - - for cid in &client_ids { - let msg = MsgUpdateAnyClient { - client_id: cid.clone(), - header: MockHeader::new(update_height).into(), - signer: signer.clone(), - }; - - let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result: _, - mut events, - log, - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - assert!( - matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - assert!(log.is_empty()); - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } - } - - #[test] - fn test_update_synthetic_tendermint_client_adjacent_ok() { - let client_id = ClientId::new(ClientType::Tendermint, 0).unwrap(); - let client_height = Height::new(1, 20); - let update_height = Height::new(1, 21); - - let ctx = MockContext::new( - ChainId::new("mockgaiaA".to_string(), 1), - HostType::Mock, - 5, - Height::new(1, 1), - ) - .with_client_parametrized( - &client_id, - client_height, - Some(ClientType::Tendermint), // The target host chain (B) is synthetic TM. - Some(client_height), - ); - - let ctx_b = MockContext::new( - ChainId::new("mockgaiaB".to_string(), 1), - HostType::SyntheticTendermint, - 5, - update_height, - ); - - let signer = get_dummy_account_id(); - - let block_ref = ctx_b.host_block(update_height); - let mut latest_header: AnyHeader = block_ref.cloned().map(Into::into).unwrap(); - - latest_header = match latest_header { - AnyHeader::Tendermint(mut theader) => { - theader.trusted_height = client_height; - AnyHeader::Tendermint(theader) - } - AnyHeader::Mock(m) => AnyHeader::Mock(m), - }; - - let msg = MsgUpdateAnyClient { - client_id: client_id.clone(), - header: latest_header, - signer, - }; - - let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, - mut events, - log, - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - assert!( - matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - assert!(log.is_empty()); - // Check the result - match result { - Update(upd_res) => { - assert_eq!(upd_res.client_id, client_id); - assert!(!upd_res.client_state.is_frozen()); - assert_eq!(upd_res.client_state.latest_height(), msg.header.height(),) - } - _ => panic!("update handler result has incorrect type"), - } - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } - - #[test] - fn test_update_synthetic_tendermint_client_non_adjacent_ok() { - let client_id = ClientId::new(ClientType::Tendermint, 0).unwrap(); - let client_height = Height::new(1, 20); - let update_height = Height::new(1, 21); - - let ctx = MockContext::new( - ChainId::new("mockgaiaA".to_string(), 1), - HostType::Mock, - 5, - Height::new(1, 1), - ) - .with_client_parametrized_history( - &client_id, - client_height, - Some(ClientType::Tendermint), // The target host chain (B) is synthetic TM. - Some(client_height), - ); - - let ctx_b = MockContext::new( - ChainId::new("mockgaiaB".to_string(), 1), - HostType::SyntheticTendermint, - 5, - update_height, - ); - - let signer = get_dummy_account_id(); - - let block_ref = ctx_b.host_block(update_height); - let mut latest_header: AnyHeader = block_ref.cloned().map(Into::into).unwrap(); - - let trusted_height = client_height.clone().sub(1).unwrap_or_default(); - - latest_header = match latest_header { - AnyHeader::Tendermint(mut theader) => { - theader.trusted_height = trusted_height; - AnyHeader::Tendermint(theader) - } - AnyHeader::Mock(m) => AnyHeader::Mock(m), - }; - - let msg = MsgUpdateAnyClient { - client_id: client_id.clone(), - header: latest_header, - signer, - }; - - let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, - mut events, - log, - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - assert!( - matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - assert!(log.is_empty()); - // Check the result - match result { - Update(upd_res) => { - assert_eq!(upd_res.client_id, client_id); - assert!(!upd_res.client_state.is_frozen()); - assert_eq!(upd_res.client_state.latest_height(), msg.header.height(),) - } - _ => panic!("update handler result has incorrect type"), - } - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } - - #[test] - fn test_update_synthetic_tendermint_client_duplicate_ok() { - let client_id = ClientId::new(ClientType::Tendermint, 0).unwrap(); - let client_height = Height::new(1, 20); - - let chain_start_height = Height::new(1, 11); - - let ctx = MockContext::new( - ChainId::new("mockgaiaA".to_string(), 1), - HostType::Mock, - 5, - chain_start_height, - ) - .with_client_parametrized( - &client_id, - client_height, - Some(ClientType::Tendermint), // The target host chain (B) is synthetic TM. - Some(client_height), - ); - - let ctx_b = MockContext::new( - ChainId::new("mockgaiaB".to_string(), 1), - HostType::SyntheticTendermint, - 5, - client_height, - ); - - let signer = get_dummy_account_id(); - - let block_ref = ctx_b.host_block(client_height); - let latest_header: AnyHeader = match block_ref.cloned().map(Into::into).unwrap() { - AnyHeader::Tendermint(mut theader) => { - let cons_state = ctx.latest_consensus_states(&client_id, &client_height); - if let AnyConsensusState::Tendermint(tcs) = cons_state { - theader.signed_header.header.time = tcs.timestamp; - theader.trusted_height = Height::new(1, 11) - } - AnyHeader::Tendermint(theader) - } - AnyHeader::Mock(header) => AnyHeader::Mock(header), - }; - - let msg = MsgUpdateAnyClient { - client_id: client_id.clone(), - header: latest_header, - signer, - }; - - let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, - mut events, - log, - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - assert!( - matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - assert!(log.is_empty()); - // Check the result - match result { - Update(upd_res) => { - assert_eq!(upd_res.client_id, client_id); - assert!(!upd_res.client_state.is_frozen()); - assert_eq!(upd_res.client_state, ctx.latest_client_states(&client_id)); - assert_eq!(upd_res.client_state.latest_height(), msg.header.height(),) - } - _ => panic!("update handler result has incorrect type"), - } - } - Err(err) => { - panic!("unexpected error: {:?}", err); - } - } - } - - #[test] - fn test_update_synthetic_tendermint_client_lower_height() { - let client_id = ClientId::new(ClientType::Tendermint, 0).unwrap(); - let client_height = Height::new(1, 20); - - let client_update_height = Height::new(1, 19); - - let chain_start_height = Height::new(1, 11); - - let ctx = MockContext::new( - ChainId::new("mockgaiaA".to_string(), 1), - HostType::Mock, - 5, - chain_start_height, - ) - .with_client_parametrized( - &client_id, - client_height, - Some(ClientType::Tendermint), // The target host chain (B) is synthetic TM. - Some(client_height), - ); - - let ctx_b = MockContext::new( - ChainId::new("mockgaiaB".to_string(), 1), - HostType::SyntheticTendermint, - 5, - client_height, - ); - - let signer = get_dummy_account_id(); - - let block_ref = ctx_b.host_block(client_update_height); - let latest_header: AnyHeader = block_ref.cloned().map(Into::into).unwrap(); - - let msg = MsgUpdateAnyClient { - client_id, - header: latest_header, - signer, - }; - - let output = dispatch(&ctx, ClientMsg::UpdateClient(msg)); - - match output { - Ok(_) => { - panic!("update handler result has incorrect type"); - } - Err(err) => match err.detail() { - ErrorDetail::HeaderVerificationFailure(_) => {} - _ => panic!("unexpected error: {:?}", err), - }, - } - } + use core::str::FromStr; + use test_log::test; + + use crate::mock::header::AnyClientMessage; + use crate::{ + core::{ + ics02_client::{ + context::ClientReader, + error::{Error, ErrorDetail}, + handler::{dispatch, ClientResult::Update}, + msgs::{update_client::MsgUpdateAnyClient, ClientMsg}, + }, + ics24_host::identifier::ClientId, + }, + events::IbcEvent, + handler::HandlerOutput, + mock::{ + client_state::{AnyClientState, MockClientState}, + context::{MockClientTypes, MockContext}, + header::MockHeader, + }, + prelude::*, + test_utils::get_dummy_account_id, + timestamp::Timestamp, + Height, + }; + + #[test] + fn test_update_client_ok() { + let client_id = ClientId::default(); + let signer = get_dummy_account_id(); + + let timestamp = Timestamp::now(); + + let ctx = + MockContext::::default().with_client(&client_id, Height::new(0, 42)); + let msg = MsgUpdateAnyClient { + client_id: client_id.clone(), + client_message: MockHeader::new(Height::new(0, 46)).with_timestamp(timestamp).into(), + signer, + }; + + let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); + + match output { + Ok(HandlerOutput { result, mut events, log }) => { + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + assert!( + matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) + ); + assert_eq!(event.height(), ctx.host_height()); + assert!(log.is_empty()); + // Check the result + match result { + Update(upd_res) => { + assert_eq!(upd_res.client_id, client_id); + assert_eq!( + upd_res.client_state, + AnyClientState::Mock(MockClientState::new( + MockHeader::new(match msg.client_message { + AnyClientMessage::Mock(client_msg) => { + client_msg.height() + }, + }) + .with_timestamp(timestamp) + .into() + )) + ) + }, + _ => panic!("update handler result has incorrect type"), + } + }, + Err(err) => { + panic!("unexpected error: {}", err); + }, + } + } + + #[test] + fn test_update_nonexisting_client() { + let client_id = ClientId::from_str("mockclient1").unwrap(); + let signer = get_dummy_account_id(); + + let ctx = + MockContext::::default().with_client(&client_id, Height::new(0, 42)); + + let msg = MsgUpdateAnyClient { + client_id: ClientId::from_str("nonexistingclient").unwrap(), + client_message: MockHeader::new(Height::new(0, 46)).into(), + signer, + }; + + let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); + + match output { + Err(Error(ErrorDetail::ClientNotFound(e), _)) => { + assert_eq!(e.client_id, msg.client_id); + }, + _ => { + panic!("expected ClientNotFound error, instead got {:?}", output) + }, + } + } + + #[test] + fn test_update_client_ok_multiple() { + let client_ids = vec![ + ClientId::from_str("mockclient1").unwrap(), + ClientId::from_str("mockclient2").unwrap(), + ClientId::from_str("mockclient3").unwrap(), + ]; + let signer = get_dummy_account_id(); + let initial_height = Height::new(0, 45); + let update_height = Height::new(0, 49); + + let mut ctx = MockContext::::default(); + + for cid in &client_ids { + ctx = ctx.with_client(cid, initial_height); + } + + for cid in &client_ids { + let msg = MsgUpdateAnyClient { + client_id: cid.clone(), + client_message: MockHeader::new(update_height).into(), + signer: signer.clone(), + }; + + let output = dispatch(&ctx, ClientMsg::UpdateClient(msg.clone())); + + match output { + Ok(HandlerOutput { result: _, mut events, log }) => { + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + assert!( + matches!(event, IbcEvent::UpdateClient(ref e) if e.client_id() == &msg.client_id) + ); + assert_eq!(event.height(), ctx.host_height()); + assert!(log.is_empty()); + }, + Err(err) => { + panic!("unexpected error: {}", err); + }, + } + } + } } diff --git a/modules/src/core/ics02_client/handler/upgrade_client.rs b/modules/src/core/ics02_client/handler/upgrade_client.rs index 54e2d5caa6..265268598a 100644 --- a/modules/src/core/ics02_client/handler/upgrade_client.rs +++ b/modules/src/core/ics02_client/handler/upgrade_client.rs @@ -1,200 +1,220 @@ //! Protocol logic specific to processing ICS2 messages of type `MsgUpgradeAnyClient`. -//! -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_def::{AnyClient, ClientDef}; -use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; -use crate::core::ics02_client::context::ClientReader; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::events::Attributes; -use crate::core::ics02_client::handler::ClientResult; -use crate::core::ics02_client::msgs::upgrade_client::MsgUpgradeAnyClient; -use crate::core::ics24_host::identifier::ClientId; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; + +use crate::core::ics02_client::context::ClientTypes; +use crate::{ + core::{ + ics02_client::{ + client_def::{ClientDef, ConsensusUpdateResult}, + client_state::ClientState, + error::Error, + events::Attributes, + handler::ClientResult, + msgs::upgrade_client::MsgUpgradeAnyClient, + }, + ics24_host::identifier::ClientId, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; +use core::fmt::Debug; /// The result following the successful processing of a `MsgUpgradeAnyClient` message. /// This data type should be used with a qualified name `upgrade_client::Result` to avoid ambiguity. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct Result { - pub client_id: ClientId, - pub client_state: AnyClientState, - pub consensus_state: AnyConsensusState, +pub struct Result { + pub client_id: ClientId, + pub client_state: C::AnyClientState, + pub consensus_state: Option>, } -pub fn process( - ctx: &dyn ClientReader, - msg: MsgUpgradeAnyClient, -) -> HandlerResult { - let mut output = HandlerOutput::builder(); - let MsgUpgradeAnyClient { client_id, .. } = msg; - - // Read client state from the host chain store. - let client_state = ctx.client_state(&client_id)?; - - if client_state.is_frozen() { - return Err(Error::client_frozen(client_id)); - } - - let upgrade_client_state = msg.client_state.clone(); - - if client_state.latest_height() >= upgrade_client_state.latest_height() { - return Err(Error::low_upgrade_height( - client_state.latest_height(), - upgrade_client_state.latest_height(), - )); - } - - let client_type = ctx.client_type(&client_id)?; - - let client_def = AnyClient::from_client_type(client_type); - - let (new_client_state, new_consensus_state) = client_def.verify_upgrade_and_update_state( - &upgrade_client_state, - &msg.consensus_state, - msg.proof_upgrade_client.clone(), - msg.proof_upgrade_consensus_state, - )?; - - // Not implemented yet: https://github.com/informalsystems/ibc-rs/issues/722 - // todo!() - - let result = ClientResult::Upgrade(Result { - client_id: client_id.clone(), - client_state: new_client_state, - consensus_state: new_consensus_state, - }); - let event_attributes = Attributes { - client_id, - height: ctx.host_height(), - ..Default::default() - }; - - output.emit(IbcEvent::UpgradeClient(event_attributes.into())); - Ok(output.with_result(result)) +pub fn process( + ctx: &Ctx, + msg: MsgUpgradeAnyClient, +) -> HandlerResult, Error> +where + Ctx: ReaderContext + Eq + Debug + Clone, +{ + let mut output = HandlerOutput::builder(); + let MsgUpgradeAnyClient { client_id, .. } = msg; + + // Read client state from the host chain store. + let client_state = ctx.client_state(&client_id)?; + + if client_state.is_frozen() { + return Err(Error::client_frozen(client_id)); + } + + let upgrade_client_state = msg.client_state.clone(); + + if client_state.latest_height() >= upgrade_client_state.latest_height() { + return Err(Error::low_upgrade_height( + client_state.latest_height(), + upgrade_client_state.latest_height(), + )); + } + + let client_type = ctx.client_type(&client_id)?; + + let client_def = client_state.client_def(); + + let (new_client_state, new_consensus_state) = client_def + .verify_upgrade_and_update_state::( + &ctx, + client_id.clone(), + &client_state, + &upgrade_client_state, + &msg.consensus_state, + msg.proof_upgrade_client.clone(), + msg.proof_upgrade_consensus_state, + )?; + + let event_attributes = Attributes { + client_id: client_id.clone(), + height: ctx.host_height(), + client_type: client_type.to_owned(), + consensus_height: new_client_state.latest_height(), + }; + + let result = ClientResult::Upgrade(Result { + client_id, + client_state: new_client_state, + consensus_state: Some(new_consensus_state), + }); + + output.emit(IbcEvent::UpgradeClient(event_attributes.into())); + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use core::str::FromStr; - - use crate::core::ics02_client::context::ClientReader; - use crate::core::ics02_client::error::{Error, ErrorDetail}; - use crate::core::ics02_client::handler::dispatch; - use crate::core::ics02_client::handler::ClientResult::Upgrade; - use crate::core::ics02_client::msgs::upgrade_client::MsgUpgradeAnyClient; - use crate::core::ics02_client::msgs::ClientMsg; - use crate::core::ics24_host::identifier::ClientId; - use crate::events::IbcEvent; - use crate::handler::HandlerOutput; - use crate::mock::client_state::{MockClientState, MockConsensusState}; - use crate::mock::context::MockContext; - use crate::mock::header::MockHeader; - use crate::test_utils::get_dummy_account_id; - use crate::Height; - - #[test] - fn test_upgrade_client_ok() { - let client_id = ClientId::default(); - let signer = get_dummy_account_id(); - - let ctx = MockContext::default().with_client(&client_id, Height::new(0, 42)); - - let msg = MsgUpgradeAnyClient { - client_id: client_id.clone(), - client_state: MockClientState::new(MockHeader::new(Height::new(1, 26))).into(), - consensus_state: MockConsensusState::new(MockHeader::new(Height::new(1, 26))).into(), - proof_upgrade_client: Default::default(), - proof_upgrade_consensus_state: Default::default(), - signer, - }; - - let output = dispatch(&ctx, ClientMsg::UpgradeClient(msg.clone())); - - match output { - Ok(HandlerOutput { - result, - mut events, - log, - }) => { - assert_eq!(events.len(), 1); - let event = events.pop().unwrap(); - assert!( - matches!(event, IbcEvent::UpgradeClient(ref e) if e.client_id() == &msg.client_id) - ); - assert_eq!(event.height(), ctx.host_height()); - assert!(log.is_empty()); - // Check the result - match result { - Upgrade(upg_res) => { - assert_eq!(upg_res.client_id, client_id); - assert_eq!(upg_res.client_state, msg.client_state) - } - _ => panic!("upgrade handler result has incorrect type"), - } - } - Err(err) => { - panic!("unexpected error: {}", err); - } - } - } - - #[test] - fn test_upgrade_nonexisting_client() { - let client_id = ClientId::from_str("mockclient1").unwrap(); - let signer = get_dummy_account_id(); - - let ctx = MockContext::default().with_client(&client_id, Height::new(0, 42)); - - let msg = MsgUpgradeAnyClient { - client_id: ClientId::from_str("nonexistingclient").unwrap(), - client_state: MockClientState::new(MockHeader::new(Height::new(1, 26))).into(), - consensus_state: MockConsensusState::new(MockHeader::new(Height::new(1, 26))).into(), - proof_upgrade_client: Default::default(), - proof_upgrade_consensus_state: Default::default(), - signer, - }; - - let output = dispatch(&ctx, ClientMsg::UpgradeClient(msg.clone())); - - match output { - Err(Error(ErrorDetail::ClientNotFound(e), _)) => { - assert_eq!(e.client_id, msg.client_id); - } - _ => { - panic!("expected ClientNotFound error, instead got {:?}", output); - } - } - } - - #[test] - fn test_upgrade_client_low_height() { - let client_id = ClientId::default(); - let signer = get_dummy_account_id(); - - let ctx = MockContext::default().with_client(&client_id, Height::new(0, 42)); - - let msg = MsgUpgradeAnyClient { - client_id, - client_state: MockClientState::new(MockHeader::new(Height::new(0, 26))).into(), - consensus_state: MockConsensusState::new(MockHeader::new(Height::new(0, 26))).into(), - proof_upgrade_client: Default::default(), - proof_upgrade_consensus_state: Default::default(), - signer, - }; - - let output = dispatch(&ctx, ClientMsg::UpgradeClient(msg.clone())); - - match output { - Err(Error(ErrorDetail::LowUpgradeHeight(e), _)) => { - assert_eq!(e.upgraded_height, Height::new(0, 42)); - assert_eq!(e.client_height, msg.client_state.latest_height()); - } - _ => { - panic!("expected LowUpgradeHeight error, instead got {:?}", output); - } - } - } + use crate::prelude::*; + + use core::str::FromStr; + + use crate::{ + core::{ + ics02_client::{ + client_state::ClientState, + context::ClientReader, + error::{Error, ErrorDetail}, + handler::{dispatch, ClientResult::Upgrade}, + msgs::{upgrade_client::MsgUpgradeAnyClient, ClientMsg}, + }, + ics24_host::identifier::ClientId, + }, + events::IbcEvent, + handler::HandlerOutput, + mock::{ + client_state::{MockClientState, MockConsensusState}, + context::{MockClientTypes, MockContext}, + header::MockHeader, + }, + test_utils::get_dummy_account_id, + Height, + }; + + #[test] + fn test_upgrade_client_ok() { + let client_id = ClientId::default(); + let signer = get_dummy_account_id(); + + let ctx = + MockContext::::default().with_client(&client_id, Height::new(0, 42)); + + let msg = MsgUpgradeAnyClient { + client_id: client_id.clone(), + client_state: MockClientState::new(MockHeader::new(Height::new(1, 26)).into()).into(), + consensus_state: MockConsensusState::new(MockHeader::new(Height::new(1, 26))).into(), + proof_upgrade_client: Default::default(), + proof_upgrade_consensus_state: Default::default(), + signer, + }; + + let output = dispatch(&ctx, ClientMsg::UpgradeClient(msg.clone())); + + match output { + Ok(HandlerOutput { result, mut events, log }) => { + assert_eq!(events.len(), 1); + let event = events.pop().unwrap(); + assert!( + matches!(event, IbcEvent::UpgradeClient(ref e) if e.client_id() == &msg.client_id) + ); + assert_eq!(event.height(), ctx.host_height()); + assert!(log.is_empty()); + // Check the result + match result { + Upgrade(upg_res) => { + assert_eq!(upg_res.client_id, client_id); + assert_eq!(upg_res.client_state, msg.client_state) + }, + _ => panic!("upgrade handler result has incorrect type"), + } + }, + Err(err) => { + panic!("unexpected error: {}", err); + }, + } + } + + #[test] + fn test_upgrade_nonexisting_client() { + let client_id = ClientId::from_str("mockclient1").unwrap(); + let signer = get_dummy_account_id(); + + let ctx = + MockContext::::default().with_client(&client_id, Height::new(0, 42)); + + let msg = MsgUpgradeAnyClient { + client_id: ClientId::from_str("nonexistingclient").unwrap(), + client_state: MockClientState::new(MockHeader::new(Height::new(1, 26).into()).into()) + .into(), + consensus_state: MockConsensusState::new(MockHeader::new(Height::new(1, 26))).into(), + proof_upgrade_client: Default::default(), + proof_upgrade_consensus_state: Default::default(), + signer, + }; + + let output = dispatch(&ctx, ClientMsg::UpgradeClient(msg.clone())); + + match output { + Err(Error(ErrorDetail::ClientNotFound(e), _)) => { + assert_eq!(e.client_id, msg.client_id); + }, + _ => { + panic!("expected ClientNotFound error, instead got {:?}", output); + }, + } + } + + #[test] + fn test_upgrade_client_low_height() { + let client_id = ClientId::default(); + let signer = get_dummy_account_id(); + + let ctx = + MockContext::::default().with_client(&client_id, Height::new(0, 42)); + + let msg = MsgUpgradeAnyClient { + client_id, + client_state: MockClientState::new(MockHeader::new(Height::new(0, 26)).into()).into(), + consensus_state: MockConsensusState::new(MockHeader::new(Height::new(0, 26))).into(), + proof_upgrade_client: Default::default(), + proof_upgrade_consensus_state: Default::default(), + signer, + }; + + let output = dispatch(&ctx, ClientMsg::UpgradeClient(msg.clone())); + + match output { + Err(Error(ErrorDetail::LowUpgradeHeight(e), _)) => { + assert_eq!(e.upgraded_height, Height::new(0, 42)); + assert_eq!(e.client_height, msg.client_state.latest_height()); + }, + _ => { + panic!("expected LowUpgradeHeight error, instead got {:?}", output); + }, + } + } } diff --git a/modules/src/core/ics02_client/header.rs b/modules/src/core/ics02_client/header.rs deleted file mode 100644 index cd2de5e9d7..0000000000 --- a/modules/src/core/ics02_client/header.rs +++ /dev/null @@ -1,130 +0,0 @@ -use core::ops::Deref; - -use ibc_proto::google::protobuf::Any; -use serde_derive::{Deserialize, Serialize}; -use subtle_encoding::hex; -use tendermint_proto::Protobuf; - -use crate::clients::ics07_tendermint::header::{decode_header, Header as TendermintHeader}; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error; -#[cfg(any(test, feature = "mocks"))] -use crate::mock::header::MockHeader; -use crate::prelude::*; -use crate::timestamp::Timestamp; -use crate::Height; - -pub const TENDERMINT_HEADER_TYPE_URL: &str = "/ibc.lightclients.tendermint.v1.Header"; -pub const MOCK_HEADER_TYPE_URL: &str = "/ibc.mock.Header"; - -/// Abstract of consensus state update information -pub trait Header: Clone + core::fmt::Debug + Send + Sync { - /// The type of client (eg. Tendermint) - fn client_type(&self) -> ClientType; - - /// The height of the consensus state - fn height(&self) -> Height; - - /// The timestamp of the consensus state - fn timestamp(&self) -> Timestamp; - - /// Wrap into an `AnyHeader` - fn wrap_any(self) -> AnyHeader; -} - -#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -#[allow(clippy::large_enum_variant)] -pub enum AnyHeader { - Tendermint(TendermintHeader), - - #[cfg(any(test, feature = "mocks"))] - Mock(MockHeader), -} - -impl Header for AnyHeader { - fn client_type(&self) -> ClientType { - match self { - Self::Tendermint(header) => header.client_type(), - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(header) => header.client_type(), - } - } - - fn height(&self) -> Height { - match self { - Self::Tendermint(header) => header.height(), - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(header) => header.height(), - } - } - - fn timestamp(&self) -> Timestamp { - match self { - Self::Tendermint(header) => header.timestamp(), - #[cfg(any(test, feature = "mocks"))] - Self::Mock(header) => header.timestamp(), - } - } - - fn wrap_any(self) -> AnyHeader { - self - } -} - -impl AnyHeader { - pub fn encode_to_string(&self) -> String { - let buf = Protobuf::encode_vec(self).expect("encoding shouldn't fail"); - let encoded = hex::encode(buf); - String::from_utf8(encoded).expect("hex-encoded string should always be valid UTF-8") - } - - pub fn decode_from_string(s: &str) -> Result { - let header_bytes = hex::decode(s).unwrap(); - Protobuf::decode(header_bytes.as_ref()).map_err(Error::invalid_raw_header) - } -} - -impl Protobuf for AnyHeader {} - -impl TryFrom for AnyHeader { - type Error = Error; - - fn try_from(raw: Any) -> Result { - match raw.type_url.as_str() { - TENDERMINT_HEADER_TYPE_URL => { - let val = decode_header(raw.value.deref()).map_err(Error::tendermint)?; - - Ok(AnyHeader::Tendermint(val)) - } - - #[cfg(any(test, feature = "mocks"))] - MOCK_HEADER_TYPE_URL => Ok(AnyHeader::Mock( - MockHeader::decode_vec(&raw.value).map_err(Error::invalid_raw_header)?, - )), - - _ => Err(Error::unknown_header_type(raw.type_url)), - } - } -} - -impl From for Any { - fn from(value: AnyHeader) -> Self { - match value { - AnyHeader::Tendermint(header) => Any { - type_url: TENDERMINT_HEADER_TYPE_URL.to_string(), - value: header - .encode_vec() - .expect("encoding to `Any` from `AnyHeader::Tendermint`"), - }, - #[cfg(any(test, feature = "mocks"))] - AnyHeader::Mock(header) => Any { - type_url: MOCK_HEADER_TYPE_URL.to_string(), - value: header - .encode_vec() - .expect("encoding to `Any` from `AnyHeader::Mock`"), - }, - } - } -} diff --git a/modules/src/core/ics02_client/height.rs b/modules/src/core/ics02_client/height.rs index e7feae2028..6b6794db68 100644 --- a/modules/src/core/ics02_client/height.rs +++ b/modules/src/core/ics02_client/height.rs @@ -1,8 +1,7 @@ use crate::prelude::*; use core::cmp::Ordering; -use core::num::ParseIntError; -use core::str::FromStr; +use core::{num::ParseIntError, str::FromStr}; use flex_error::{define_error, TraceError}; use serde_derive::{Deserialize, Serialize}; @@ -14,169 +13,157 @@ use crate::core::ics02_client::error::Error; #[derive(Copy, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct Height { - /// Previously known as "epoch" - pub revision_number: u64, + /// Previously known as "epoch" + pub revision_number: u64, - /// The height of a block - pub revision_height: u64, + /// The height of a block + pub revision_height: u64, } impl Height { - pub fn new(revision_number: u64, revision_height: u64) -> Self { - Self { - revision_number, - revision_height, - } - } - - pub fn zero() -> Height { - Self { - revision_number: 0, - revision_height: 0, - } - } - - pub fn is_zero(&self) -> bool { - self.revision_height == 0 - } - - pub fn add(&self, delta: u64) -> Height { - Height { - revision_number: self.revision_number, - revision_height: self.revision_height + delta, - } - } - - pub fn increment(&self) -> Height { - self.add(1) - } - - pub fn sub(&self, delta: u64) -> Result { - if self.revision_height <= delta { - return Err(Error::invalid_height_result()); - } - - Ok(Height { - revision_number: self.revision_number, - revision_height: self.revision_height - delta, - }) - } - - pub fn decrement(&self) -> Result { - self.sub(1) - } - - pub fn with_revision_height(self, revision_height: u64) -> Height { - Height { - revision_height, - ..self - } - } + pub fn new(revision_number: u64, revision_height: u64) -> Self { + Self { revision_number, revision_height } + } + + pub fn zero() -> Height { + Self { revision_number: 0, revision_height: 0 } + } + + pub fn is_zero(&self) -> bool { + self.revision_height == 0 + } + + pub fn add(&self, delta: u64) -> Height { + Height { + revision_number: self.revision_number, + revision_height: self.revision_height + delta, + } + } + + pub fn increment(&self) -> Height { + self.add(1) + } + + pub fn sub(&self, delta: u64) -> Result { + if self.revision_height <= delta { + return Err(Error::invalid_height_result()) + } + + Ok(Height { + revision_number: self.revision_number, + revision_height: self.revision_height - delta, + }) + } + + pub fn decrement(&self) -> Result { + self.sub(1) + } + + pub fn with_revision_height(self, revision_height: u64) -> Height { + Height { revision_height, ..self } + } } impl Default for Height { - fn default() -> Self { - Self::zero() - } + fn default() -> Self { + Self::zero() + } } impl PartialOrd for Height { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } } impl Ord for Height { - fn cmp(&self, other: &Self) -> Ordering { - if self.revision_number < other.revision_number { - Ordering::Less - } else if self.revision_number > other.revision_number { - Ordering::Greater - } else if self.revision_height < other.revision_height { - Ordering::Less - } else if self.revision_height > other.revision_height { - Ordering::Greater - } else { - Ordering::Equal - } - } + fn cmp(&self, other: &Self) -> Ordering { + if self.revision_number < other.revision_number { + Ordering::Less + } else if self.revision_number > other.revision_number { + Ordering::Greater + } else if self.revision_height < other.revision_height { + Ordering::Less + } else if self.revision_height > other.revision_height { + Ordering::Greater + } else { + Ordering::Equal + } + } } impl Protobuf for Height {} impl From for Height { - fn from(raw: RawHeight) -> Self { - Height { - revision_number: raw.revision_number, - revision_height: raw.revision_height, - } - } + fn from(raw: RawHeight) -> Self { + Height { revision_number: raw.revision_number, revision_height: raw.revision_height } + } } impl From for RawHeight { - fn from(ics_height: Height) -> Self { - RawHeight { - revision_number: ics_height.revision_number, - revision_height: ics_height.revision_height, - } - } + fn from(ics_height: Height) -> Self { + RawHeight { + revision_number: ics_height.revision_number, + revision_height: ics_height.revision_height, + } + } } impl core::fmt::Debug for Height { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - f.debug_struct("Height") - .field("revision", &self.revision_number) - .field("height", &self.revision_height) - .finish() - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + f.debug_struct("Height") + .field("revision", &self.revision_number) + .field("height", &self.revision_height) + .finish() + } } /// Custom debug output to omit the packet data impl core::fmt::Display for Height { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "{}-{}", self.revision_number, self.revision_height) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "{}-{}", self.revision_number, self.revision_height) + } } define_error! { - #[derive(Debug, PartialEq, Eq)] - HeightError { - HeightConversion - { height: String } - [ TraceError ] - | e | { - format_args!("cannot convert into a `Height` type from string {0}", - e.height) - }, - } + #[derive(Debug, PartialEq, Eq)] + HeightError { + HeightConversion + { height: String } + [ TraceError ] + | e | { + format_args!("cannot convert into a `Height` type from string {0}", + e.height) + }, + } } impl TryFrom<&str> for Height { - type Error = HeightError; - - fn try_from(value: &str) -> Result { - let split: Vec<&str> = value.split('-').collect(); - Ok(Height { - revision_number: split[0] - .parse::() - .map_err(|e| HeightError::height_conversion(value.to_owned(), e))?, - revision_height: split[1] - .parse::() - .map_err(|e| HeightError::height_conversion(value.to_owned(), e))?, - }) - } + type Error = HeightError; + + fn try_from(value: &str) -> Result { + let split: Vec<&str> = value.split('-').collect(); + Ok(Height { + revision_number: split[0] + .parse::() + .map_err(|e| HeightError::height_conversion(value.to_owned(), e))?, + revision_height: split[1] + .parse::() + .map_err(|e| HeightError::height_conversion(value.to_owned(), e))?, + }) + } } impl From for String { - fn from(height: Height) -> Self { - format!("{}-{}", height.revision_number, height.revision_number) - } + fn from(height: Height) -> Self { + format!("{}-{}", height.revision_number, height.revision_number) + } } impl FromStr for Height { - type Err = HeightError; + type Err = HeightError; - fn from_str(s: &str) -> Result { - Height::try_from(s) - } + fn from_str(s: &str) -> Result { + Height::try_from(s) + } } diff --git a/modules/src/core/ics02_client/misbehaviour.rs b/modules/src/core/ics02_client/misbehaviour.rs deleted file mode 100644 index 51ba8f1144..0000000000 --- a/modules/src/core/ics02_client/misbehaviour.rs +++ /dev/null @@ -1,121 +0,0 @@ -use crate::prelude::*; - -use ibc_proto::google::protobuf::Any; -use tendermint_proto::Protobuf; - -use crate::clients::ics07_tendermint::misbehaviour::Misbehaviour as TmMisbehaviour; -use crate::core::ics02_client::error::Error; - -#[cfg(any(test, feature = "mocks"))] -use crate::mock::misbehaviour::Misbehaviour as MockMisbehaviour; - -use crate::core::ics24_host::identifier::ClientId; -use crate::Height; - -use super::header::AnyHeader; - -pub const TENDERMINT_MISBEHAVIOR_TYPE_URL: &str = "/ibc.lightclients.tendermint.v1.Misbehaviour"; - -#[cfg(any(test, feature = "mocks"))] -pub const MOCK_MISBEHAVIOUR_TYPE_URL: &str = "/ibc.mock.Misbehavior"; - -pub trait Misbehaviour: Clone + core::fmt::Debug + Send + Sync { - /// The type of client (eg. Tendermint) - fn client_id(&self) -> &ClientId; - - /// The height of the consensus state - fn height(&self) -> Height; - - fn wrap_any(self) -> AnyMisbehaviour; -} - -#[derive(Clone, Debug, PartialEq)] // TODO: Add Eq bound once possible -#[allow(clippy::large_enum_variant)] -pub enum AnyMisbehaviour { - Tendermint(TmMisbehaviour), - - #[cfg(any(test, feature = "mocks"))] - Mock(MockMisbehaviour), -} - -impl Misbehaviour for AnyMisbehaviour { - fn client_id(&self) -> &ClientId { - match self { - Self::Tendermint(misbehaviour) => misbehaviour.client_id(), - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(misbehaviour) => misbehaviour.client_id(), - } - } - - fn height(&self) -> Height { - match self { - Self::Tendermint(misbehaviour) => misbehaviour.height(), - - #[cfg(any(test, feature = "mocks"))] - Self::Mock(misbehaviour) => misbehaviour.height(), - } - } - - fn wrap_any(self) -> AnyMisbehaviour { - self - } -} - -impl Protobuf for AnyMisbehaviour {} - -impl TryFrom for AnyMisbehaviour { - type Error = Error; - - fn try_from(raw: Any) -> Result { - match raw.type_url.as_str() { - TENDERMINT_MISBEHAVIOR_TYPE_URL => Ok(AnyMisbehaviour::Tendermint( - TmMisbehaviour::decode_vec(&raw.value).map_err(Error::decode_raw_misbehaviour)?, - )), - - #[cfg(any(test, feature = "mocks"))] - MOCK_MISBEHAVIOUR_TYPE_URL => Ok(AnyMisbehaviour::Mock( - MockMisbehaviour::decode_vec(&raw.value).map_err(Error::decode_raw_misbehaviour)?, - )), - _ => Err(Error::unknown_misbehaviour_type(raw.type_url)), - } - } -} - -impl From for Any { - fn from(value: AnyMisbehaviour) -> Self { - match value { - AnyMisbehaviour::Tendermint(misbehaviour) => Any { - type_url: TENDERMINT_MISBEHAVIOR_TYPE_URL.to_string(), - value: misbehaviour - .encode_vec() - .expect("encoding to `Any` from `AnyMisbehavior::Tendermint`"), - }, - - #[cfg(any(test, feature = "mocks"))] - AnyMisbehaviour::Mock(misbehaviour) => Any { - type_url: MOCK_MISBEHAVIOUR_TYPE_URL.to_string(), - value: misbehaviour - .encode_vec() - .expect("encoding to `Any` from `AnyMisbehavior::Mock`"), - }, - } - } -} - -impl core::fmt::Display for AnyMisbehaviour { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - match self { - AnyMisbehaviour::Tendermint(tm) => write!(f, "{}", tm), - - #[cfg(any(test, feature = "mocks"))] - AnyMisbehaviour::Mock(mock) => write!(f, "{:?}", mock), - } - } -} - -#[derive(Clone, Debug, PartialEq)] -pub struct MisbehaviourEvidence { - pub misbehaviour: AnyMisbehaviour, - pub supporting_headers: Vec, -} diff --git a/modules/src/core/ics02_client/mod.rs b/modules/src/core/ics02_client/mod.rs index 9c25ef479f..18dc4010c5 100644 --- a/modules/src/core/ics02_client/mod.rs +++ b/modules/src/core/ics02_client/mod.rs @@ -2,14 +2,13 @@ pub mod client_consensus; pub mod client_def; +pub mod client_message; pub mod client_state; pub mod client_type; pub mod context; pub mod error; pub mod events; pub mod handler; -pub mod header; pub mod height; -pub mod misbehaviour; pub mod msgs; pub mod trust_threshold; diff --git a/modules/src/core/ics02_client/msgs.rs b/modules/src/core/ics02_client/msgs.rs index 3131be9beb..ecdac55486 100644 --- a/modules/src/core/ics02_client/msgs.rs +++ b/modules/src/core/ics02_client/msgs.rs @@ -4,21 +4,20 @@ //! subsequently calls into the chain-specific (e.g., ICS 07) client handler. See: //! . -use crate::core::ics02_client::msgs::create_client::MsgCreateAnyClient; -use crate::core::ics02_client::msgs::misbehavior::MsgSubmitAnyMisbehaviour; -use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; -use crate::core::ics02_client::msgs::upgrade_client::MsgUpgradeAnyClient; +use crate::core::ics02_client::context::ClientTypes; +use crate::core::ics02_client::msgs::{ + create_client::MsgCreateAnyClient, update_client::MsgUpdateAnyClient, + upgrade_client::MsgUpgradeAnyClient, +}; pub mod create_client; -pub mod misbehavior; pub mod update_client; pub mod upgrade_client; #[allow(clippy::large_enum_variant)] #[derive(Clone, Debug)] -pub enum ClientMsg { - CreateClient(MsgCreateAnyClient), - UpdateClient(MsgUpdateAnyClient), - Misbehaviour(MsgSubmitAnyMisbehaviour), - UpgradeClient(MsgUpgradeAnyClient), +pub enum ClientMsg { + CreateClient(MsgCreateAnyClient), + UpdateClient(MsgUpdateAnyClient), + UpgradeClient(MsgUpgradeAnyClient), } diff --git a/modules/src/core/ics02_client/msgs/create_client.rs b/modules/src/core/ics02_client/msgs/create_client.rs index 469bdc27e0..b3ce322c2c 100644 --- a/modules/src/core/ics02_client/msgs/create_client.rs +++ b/modules/src/core/ics02_client/msgs/create_client.rs @@ -1,123 +1,107 @@ //! Definition of domain type message `MsgCreateAnyClient`. use crate::prelude::*; +use core::fmt::Display; +use ibc_proto::google::protobuf::Any; use tendermint_proto::Protobuf; -use ibc_proto::ibc::core::client::v1::MsgCreateClient as RawMsgCreateClient; +use ibc_proto::ibc::core::client::v1::{MsgCreateClient as RawMsgCreateClient, MsgCreateClient}; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::error::Error; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::core::ics02_client::context::ClientTypes; +use crate::{core::ics02_client::error::Error, signer::Signer, tx_msg::Msg}; pub const TYPE_URL: &str = "/ibc.core.client.v1.MsgCreateClient"; /// A type of message that triggers the creation of a new on-chain (IBC) client. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct MsgCreateAnyClient { - pub client_state: AnyClientState, - pub consensus_state: AnyConsensusState, - pub signer: Signer, +pub struct MsgCreateAnyClient { + pub client_state: C::AnyClientState, + pub consensus_state: C::AnyConsensusState, + pub signer: Signer, } -impl MsgCreateAnyClient { - pub fn new( - client_state: AnyClientState, - consensus_state: AnyConsensusState, - signer: Signer, - ) -> Result { - if client_state.client_type() != consensus_state.client_type() { - return Err(Error::raw_client_and_consensus_state_types_mismatch( - client_state.client_type(), - consensus_state.client_type(), - )); - } - Ok(MsgCreateAnyClient { - client_state, - consensus_state, - signer, - }) - } +impl MsgCreateAnyClient { + pub fn new( + client_state: C::AnyClientState, + consensus_state: C::AnyConsensusState, + signer: Signer, + ) -> Result { + // if client_state.client_type() != consensus_state.client_type() { + // return Err(Error::raw_client_and_consensus_state_types_mismatch( + // client_state.client_type(), + // consensus_state.client_type(), + // )); + // } + + Ok(MsgCreateAnyClient { client_state, consensus_state, signer }) + } } -impl Msg for MsgCreateAnyClient { - type ValidationError = crate::core::ics24_host::error::ValidationError; - type Raw = RawMsgCreateClient; - - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } - - fn type_url(&self) -> String { - TYPE_URL.to_string() - } +impl Msg for MsgCreateAnyClient +where + C: ClientTypes + Clone, + Any: From, + Any: From, +{ + type ValidationError = crate::core::ics24_host::error::ValidationError; + type Raw = RawMsgCreateClient; + + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } + + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } -impl Protobuf for MsgCreateAnyClient {} - -impl TryFrom for MsgCreateAnyClient { - type Error = Error; - - fn try_from(raw: RawMsgCreateClient) -> Result { - let raw_client_state = raw - .client_state - .ok_or_else(Error::missing_raw_client_state)?; - - let raw_consensus_state = raw - .consensus_state - .ok_or_else(Error::missing_raw_client_state)?; - - MsgCreateAnyClient::new( - AnyClientState::try_from(raw_client_state)?, - AnyConsensusState::try_from(raw_consensus_state)?, - raw.signer.parse().map_err(Error::signer)?, - ) - } +impl Protobuf for MsgCreateAnyClient +where + C: ClientTypes + Clone, + Any: From, + Any: From, + MsgCreateAnyClient: TryFrom, + as TryFrom>::Error: Display, +{ } -impl From for RawMsgCreateClient { - fn from(ics_msg: MsgCreateAnyClient) -> Self { - RawMsgCreateClient { - client_state: Some(ics_msg.client_state.into()), - consensus_state: Some(ics_msg.consensus_state.into()), - signer: ics_msg.signer.to_string(), - } - } +impl TryFrom for MsgCreateAnyClient +where + C: ClientTypes, + C::AnyClientState: TryFrom, + C::AnyConsensusState: TryFrom, + Error: From<>::Error>, +{ + type Error = Error; + + fn try_from(raw: RawMsgCreateClient) -> Result { + let raw_client_state = raw.client_state.ok_or_else(Error::missing_raw_client_state)?; + + let consensus_state = raw + .consensus_state + .and_then(|cs| C::AnyConsensusState::try_from(cs).ok()) + .ok_or_else(Error::missing_raw_consensus_state)?; + + MsgCreateAnyClient::new( + C::AnyClientState::try_from(raw_client_state)?, + consensus_state, + raw.signer.parse().map_err(Error::signer)?, + ) + } } -#[cfg(test)] -mod tests { - - use test_log::test; - - use ibc_proto::ibc::core::client::v1::MsgCreateClient; - - use crate::clients::ics07_tendermint::client_state::test_util::get_dummy_tendermint_client_state; - use crate::clients::ics07_tendermint::header::test_util::get_dummy_tendermint_header; - use crate::core::ics02_client::client_consensus::AnyConsensusState; - use crate::core::ics02_client::msgs::MsgCreateAnyClient; - use crate::test_utils::get_dummy_account_id; - - #[test] - fn msg_create_client_serialization() { - let signer = get_dummy_account_id(); - - let tm_header = get_dummy_tendermint_header(); - let tm_client_state = get_dummy_tendermint_client_state(tm_header.clone()); - - let msg = MsgCreateAnyClient::new( - tm_client_state, - AnyConsensusState::Tendermint(tm_header.try_into().unwrap()), - signer, - ) - .unwrap(); - - let raw = MsgCreateClient::from(msg.clone()); - let msg_back = MsgCreateAnyClient::try_from(raw.clone()).unwrap(); - let raw_back = MsgCreateClient::from(msg_back.clone()); - assert_eq!(msg, msg_back); - assert_eq!(raw, raw_back); - } +impl From> for RawMsgCreateClient +where + C: ClientTypes, + Any: From, + Any: From, +{ + fn from(ics_msg: MsgCreateAnyClient) -> Self { + RawMsgCreateClient { + client_state: Some(ics_msg.client_state.into()), + consensus_state: Some(ics_msg.consensus_state.into()), + signer: ics_msg.signer.to_string(), + } + } } diff --git a/modules/src/core/ics02_client/msgs/misbehavior.rs b/modules/src/core/ics02_client/msgs/misbehavior.rs deleted file mode 100644 index 648aaf9d2f..0000000000 --- a/modules/src/core/ics02_client/msgs/misbehavior.rs +++ /dev/null @@ -1,67 +0,0 @@ -use crate::prelude::*; - -use ibc_proto::ibc::core::client::v1::MsgSubmitMisbehaviour as RawMsgSubmitMisbehaviour; -use tendermint_proto::Protobuf; - -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::misbehaviour::AnyMisbehaviour; -use crate::core::ics24_host::identifier::ClientId; -use crate::signer::Signer; -use crate::tx_msg::Msg; - -pub const TYPE_URL: &str = "/ibc.core.client.v1.MsgSubmitMisbehaviour"; - -/// A type of message that submits client misbehaviour proof. -#[derive(Clone, Debug, PartialEq)] -pub struct MsgSubmitAnyMisbehaviour { - /// client unique identifier - pub client_id: ClientId, - /// misbehaviour used for freezing the light client - pub misbehaviour: AnyMisbehaviour, - /// signer address - pub signer: Signer, -} - -impl Msg for MsgSubmitAnyMisbehaviour { - type ValidationError = crate::core::ics24_host::error::ValidationError; - type Raw = RawMsgSubmitMisbehaviour; - - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } - - fn type_url(&self) -> String { - TYPE_URL.to_string() - } -} - -impl Protobuf for MsgSubmitAnyMisbehaviour {} - -impl TryFrom for MsgSubmitAnyMisbehaviour { - type Error = Error; - - fn try_from(raw: RawMsgSubmitMisbehaviour) -> Result { - let raw_misbehaviour = raw - .misbehaviour - .ok_or_else(Error::missing_raw_misbehaviour)?; - - Ok(MsgSubmitAnyMisbehaviour { - client_id: raw - .client_id - .parse() - .map_err(Error::invalid_raw_misbehaviour)?, - misbehaviour: AnyMisbehaviour::try_from(raw_misbehaviour)?, - signer: raw.signer.parse().map_err(Error::signer)?, - }) - } -} - -impl From for RawMsgSubmitMisbehaviour { - fn from(ics_msg: MsgSubmitAnyMisbehaviour) -> Self { - RawMsgSubmitMisbehaviour { - client_id: ics_msg.client_id.to_string(), - misbehaviour: Some(ics_msg.misbehaviour.into()), - signer: ics_msg.signer.to_string(), - } - } -} diff --git a/modules/src/core/ics02_client/msgs/update_client.rs b/modules/src/core/ics02_client/msgs/update_client.rs index b7cdc4c53d..089e4ebd44 100644 --- a/modules/src/core/ics02_client/msgs/update_client.rs +++ b/modules/src/core/ics02_client/msgs/update_client.rs @@ -1,105 +1,99 @@ //! Definition of domain type message `MsgUpdateAnyClient`. use crate::prelude::*; +use core::fmt::Display; +use ibc_proto::google::protobuf::Any; use tendermint_proto::Protobuf; -use ibc_proto::ibc::core::client::v1::MsgUpdateClient as RawMsgUpdateClient; +use crate::core::ics02_client::context::ClientTypes; +use ibc_proto::ibc::core::client::v1::{MsgUpdateClient as RawMsgUpdateClient, MsgUpdateClient}; use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::header::AnyHeader; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::ClientId; -use crate::signer::Signer; -use crate::tx_msg::Msg; + +use crate::{ + core::ics24_host::{error::ValidationError, identifier::ClientId}, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.client.v1.MsgUpdateClient"; /// A type of message that triggers the update of an on-chain (IBC) client with new headers. #[derive(Clone, Debug, PartialEq)] // TODO: Add Eq bound when possible -pub struct MsgUpdateAnyClient { - pub client_id: ClientId, - pub header: AnyHeader, - pub signer: Signer, +pub struct MsgUpdateAnyClient { + pub client_id: ClientId, + pub client_message: C::AnyClientMessage, + pub signer: Signer, } -impl MsgUpdateAnyClient { - pub fn new(client_id: ClientId, header: AnyHeader, signer: Signer) -> Self { - MsgUpdateAnyClient { - client_id, - header, - signer, - } - } +impl MsgUpdateAnyClient +where + C: ClientTypes, +{ + pub fn new(client_id: ClientId, client_message: C::AnyClientMessage, signer: Signer) -> Self { + MsgUpdateAnyClient { client_id, client_message, signer } + } } -impl Msg for MsgUpdateAnyClient { - type ValidationError = ValidationError; - type Raw = RawMsgUpdateClient; - - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } - - fn type_url(&self) -> String { - TYPE_URL.to_string() - } +impl Msg for MsgUpdateAnyClient +where + C: ClientTypes + Clone, + C::AnyClientMessage: Clone, + Any: From, +{ + type ValidationError = ValidationError; + type Raw = RawMsgUpdateClient; + + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } + + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } -impl Protobuf for MsgUpdateAnyClient {} - -impl TryFrom for MsgUpdateAnyClient { - type Error = Error; - - fn try_from(raw: RawMsgUpdateClient) -> Result { - let raw_header = raw.header.ok_or_else(Error::missing_raw_header)?; - - Ok(MsgUpdateAnyClient { - client_id: raw - .client_id - .parse() - .map_err(Error::invalid_msg_update_client_id)?, - header: AnyHeader::try_from(raw_header)?, - signer: raw.signer.parse().map_err(Error::signer)?, - }) - } +impl Protobuf for MsgUpdateAnyClient +where + C: ClientTypes + Clone, + C::AnyClientMessage: Clone, + Any: From, + MsgUpdateAnyClient: TryFrom, + as TryFrom>::Error: Display, +{ } -impl From for RawMsgUpdateClient { - fn from(ics_msg: MsgUpdateAnyClient) -> Self { - RawMsgUpdateClient { - client_id: ics_msg.client_id.to_string(), - header: Some(ics_msg.header.into()), - signer: ics_msg.signer.to_string(), - } - } +impl TryFrom for MsgUpdateAnyClient +where + C: ClientTypes, + C::AnyClientMessage: TryFrom, + Error: From<>::Error>, +{ + type Error = Error; + + fn try_from(raw: RawMsgUpdateClient) -> Result { + let raw_client_message = + raw.client_message.ok_or_else(Error::missing_raw_client_message)?; + + Ok(MsgUpdateAnyClient { + client_id: raw.client_id.parse().map_err(Error::invalid_msg_update_client_id)?, + client_message: C::AnyClientMessage::try_from(raw_client_message)?, + signer: raw.signer.parse().map_err(Error::signer)?, + }) + } } -#[cfg(test)] -mod tests { - - use test_log::test; - - use ibc_proto::ibc::core::client::v1::MsgUpdateClient; - - use crate::clients::ics07_tendermint::header::test_util::get_dummy_ics07_header; - use crate::core::ics02_client::header::AnyHeader; - use crate::core::ics02_client::msgs::MsgUpdateAnyClient; - use crate::core::ics24_host::identifier::ClientId; - use crate::test_utils::get_dummy_account_id; - - #[test] - fn msg_update_client_serialization() { - let client_id: ClientId = "tendermint".parse().unwrap(); - let signer = get_dummy_account_id(); - - let header = get_dummy_ics07_header(); - - let msg = MsgUpdateAnyClient::new(client_id, AnyHeader::Tendermint(header), signer); - let raw = MsgUpdateClient::from(msg.clone()); - let msg_back = MsgUpdateAnyClient::try_from(raw.clone()).unwrap(); - let raw_back = MsgUpdateClient::from(msg_back.clone()); - assert_eq!(msg, msg_back); - assert_eq!(raw, raw_back); - } +impl From> for RawMsgUpdateClient +where + C: ClientTypes, + Any: From, +{ + fn from(ics_msg: MsgUpdateAnyClient) -> Self { + RawMsgUpdateClient { + client_id: ics_msg.client_id.to_string(), + client_message: Some(ics_msg.client_message.into()), + signer: ics_msg.signer.to_string(), + } + } } diff --git a/modules/src/core/ics02_client/msgs/upgrade_client.rs b/modules/src/core/ics02_client/msgs/upgrade_client.rs index 34ed86ea83..43944a48bf 100644 --- a/modules/src/core/ics02_client/msgs/upgrade_client.rs +++ b/modules/src/core/ics02_client/msgs/upgrade_client.rs @@ -1,211 +1,220 @@ //! Definition of domain type msg `MsgUpgradeAnyClient`. use crate::prelude::*; +use core::fmt::Display; use core::str::FromStr; +use ibc_proto::google::protobuf::Any; use tendermint_proto::Protobuf; -use ibc_proto::ibc::core::client::v1::MsgUpgradeClient as RawMsgUpgradeClient; -use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; +use ibc_proto::ibc::core::client::v1::{MsgUpgradeClient as RawMsgUpgradeClient, MsgUpgradeClient}; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::error::Error; -use crate::core::ics23_commitment::commitment::CommitmentProofBytes; -use crate::core::ics23_commitment::error::Error as Ics23Error; -use crate::core::ics24_host::identifier::ClientId; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::core::ics02_client::context::ClientTypes; +use crate::{ + core::{ics02_client::error::Error, ics24_host::identifier::ClientId}, + signer::Signer, + tx_msg::Msg, +}; pub(crate) const TYPE_URL: &str = "/ibc.core.client.v1.MsgUpgradeClient"; /// A type of message that triggers the upgrade of an on-chain (IBC) client. #[derive(Clone, Debug, PartialEq)] -pub struct MsgUpgradeAnyClient { - pub client_id: ClientId, - pub client_state: AnyClientState, - pub consensus_state: AnyConsensusState, - pub proof_upgrade_client: RawMerkleProof, - pub proof_upgrade_consensus_state: RawMerkleProof, - pub signer: Signer, +pub struct MsgUpgradeAnyClient { + pub client_id: ClientId, + pub client_state: C::AnyClientState, + pub consensus_state: C::AnyConsensusState, + pub proof_upgrade_client: Vec, + pub proof_upgrade_consensus_state: Vec, + pub signer: Signer, } -impl MsgUpgradeAnyClient { - pub fn new( - client_id: ClientId, - client_state: AnyClientState, - consensus_state: AnyConsensusState, - proof_upgrade_client: RawMerkleProof, - proof_upgrade_consensus_state: RawMerkleProof, - signer: Signer, - ) -> Self { - MsgUpgradeAnyClient { - client_id, - client_state, - consensus_state, - proof_upgrade_client, - proof_upgrade_consensus_state, - signer, - } - } +impl MsgUpgradeAnyClient { + pub fn new( + client_id: ClientId, + client_state: C::AnyClientState, + consensus_state: C::AnyConsensusState, + proof_upgrade_client: Vec, + proof_upgrade_consensus_state: Vec, + signer: Signer, + ) -> Self { + MsgUpgradeAnyClient { + client_id, + client_state, + consensus_state, + proof_upgrade_client, + proof_upgrade_consensus_state, + signer, + } + } } -impl Msg for MsgUpgradeAnyClient { - type ValidationError = crate::core::ics24_host::error::ValidationError; - type Raw = RawMsgUpgradeClient; - - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } +impl Msg for MsgUpgradeAnyClient +where + C: ClientTypes + Clone, + Any: From, + Any: From, +{ + type ValidationError = crate::core::ics24_host::error::ValidationError; + type Raw = RawMsgUpgradeClient; + + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } + + fn type_url(&self) -> String { + TYPE_URL.to_string() + } +} - fn type_url(&self) -> String { - TYPE_URL.to_string() - } +impl Protobuf for MsgUpgradeAnyClient +where + C: ClientTypes + Clone, + Any: From, + Any: From, + MsgUpgradeAnyClient: TryFrom, + as TryFrom>::Error: Display, +{ } -impl Protobuf for MsgUpgradeAnyClient {} - -impl From for RawMsgUpgradeClient { - fn from(dm_msg: MsgUpgradeAnyClient) -> RawMsgUpgradeClient { - let c_bytes = CommitmentProofBytes::try_from(dm_msg.proof_upgrade_client) - .map_or(vec![], |c| c.into()); - let cs_bytes = CommitmentProofBytes::try_from(dm_msg.proof_upgrade_consensus_state) - .map_or(vec![], |c| c.into()); - - RawMsgUpgradeClient { - client_id: dm_msg.client_id.to_string(), - client_state: Some(dm_msg.client_state.into()), - consensus_state: Some(dm_msg.consensus_state.into()), - proof_upgrade_client: c_bytes, - proof_upgrade_consensus_state: cs_bytes, - signer: dm_msg.signer.to_string(), - } - } +impl From> for RawMsgUpgradeClient +where + C: ClientTypes, + Any: From, + Any: From, +{ + fn from(dm_msg: MsgUpgradeAnyClient) -> RawMsgUpgradeClient { + RawMsgUpgradeClient { + client_id: dm_msg.client_id.to_string(), + client_state: Some(dm_msg.client_state.into()), + consensus_state: Some(dm_msg.consensus_state.into()), + proof_upgrade_client: dm_msg.proof_upgrade_client, + proof_upgrade_consensus_state: dm_msg.proof_upgrade_consensus_state, + signer: dm_msg.signer.to_string(), + } + } } -impl TryFrom for MsgUpgradeAnyClient { - type Error = Error; - - fn try_from(proto_msg: RawMsgUpgradeClient) -> Result { - let raw_client_state = proto_msg - .client_state - .ok_or_else(Error::missing_raw_client_state)?; - - let raw_consensus_state = proto_msg - .consensus_state - .ok_or_else(Error::missing_raw_client_state)?; - - let c_bytes = CommitmentProofBytes::try_from(proto_msg.proof_upgrade_client) - .map_err(|_| Error::invalid_upgrade_client_proof(Ics23Error::empty_merkle_proof()))?; - let cs_bytes = CommitmentProofBytes::try_from(proto_msg.proof_upgrade_consensus_state) - .map_err(|_| { - Error::invalid_upgrade_consensus_state_proof(Ics23Error::empty_merkle_proof()) - })?; - - Ok(MsgUpgradeAnyClient { - client_id: ClientId::from_str(&proto_msg.client_id) - .map_err(Error::invalid_client_identifier)?, - client_state: AnyClientState::try_from(raw_client_state)?, - consensus_state: AnyConsensusState::try_from(raw_consensus_state)?, - proof_upgrade_client: RawMerkleProof::try_from(c_bytes) - .map_err(Error::invalid_upgrade_client_proof)?, - proof_upgrade_consensus_state: RawMerkleProof::try_from(cs_bytes) - .map_err(Error::invalid_upgrade_consensus_state_proof)?, - signer: proto_msg.signer.parse().map_err(Error::signer)?, - }) - } +impl TryFrom for MsgUpgradeAnyClient +where + C: ClientTypes, + C::AnyClientState: TryFrom, + C::AnyConsensusState: TryFrom, + Error: From<>::Error>, + Error: From<>::Error>, +{ + type Error = Error; + + fn try_from(proto_msg: RawMsgUpgradeClient) -> Result { + let raw_client_state = + proto_msg.client_state.ok_or_else(Error::missing_raw_client_state)?; + + let raw_consensus_state = + proto_msg.consensus_state.ok_or_else(Error::missing_raw_client_state)?; + + Ok(MsgUpgradeAnyClient { + client_id: ClientId::from_str(&proto_msg.client_id) + .map_err(Error::invalid_client_identifier)?, + client_state: C::AnyClientState::try_from(raw_client_state)?, + consensus_state: C::AnyConsensusState::try_from(raw_consensus_state)?, + proof_upgrade_client: proto_msg.proof_upgrade_client, + proof_upgrade_consensus_state: proto_msg.proof_upgrade_consensus_state, + signer: Signer::from_str(proto_msg.signer.as_str()).map_err(Error::signer)?, + }) + } } #[cfg(test)] pub mod test_util { - use ibc_proto::ibc::core::client::v1::MsgUpgradeClient as RawMsgUpgradeClient; - - use crate::{ - core::{ - ics02_client::{ - client_consensus::AnyConsensusState, client_state::AnyClientState, height::Height, - }, - ics24_host::identifier::ClientId, - }, - mock::{ - client_state::{MockClientState, MockConsensusState}, - header::MockHeader, - }, - test_utils::{get_dummy_bech32_account, get_dummy_proof}, - }; - - use super::MsgUpgradeAnyClient; - - /// Extends the implementation with additional helper methods. - impl MsgUpgradeAnyClient { - /// Setter for `client_id`. Amenable to chaining, since it consumes the input message. - pub fn with_client_id(self, client_id: ClientId) -> Self { - MsgUpgradeAnyClient { client_id, ..self } - } - } - - /// Returns a dummy `RawMsgUpgradeClient`, for testing only! - pub fn get_dummy_raw_msg_upgrade_client(height: Height) -> RawMsgUpgradeClient { - RawMsgUpgradeClient { - client_id: "tendermint".parse().unwrap(), - client_state: Some( - AnyClientState::Mock(MockClientState::new(MockHeader::new(height))).into(), - ), - consensus_state: Some( - AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new(height))).into(), - ), - proof_upgrade_client: get_dummy_proof(), - proof_upgrade_consensus_state: get_dummy_proof(), - signer: get_dummy_bech32_account(), - } - } + use ibc_proto::ibc::core::client::v1::MsgUpgradeClient as RawMsgUpgradeClient; + + use crate::{ + core::{ics02_client::height::Height, ics24_host::identifier::ClientId}, + mock::{ + client_state::{ + AnyClientState, AnyConsensusState, MockClientState, MockConsensusState, + }, + context::{MockClientTypes, MockContext}, + header::MockHeader, + }, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + + use super::MsgUpgradeAnyClient; + + /// Extends the implementation with additional helper methods. + impl MsgUpgradeAnyClient> { + /// Setter for `client_id`. Amenable to chaining, since it consumes the input message. + pub fn with_client_id(self, client_id: ClientId) -> Self { + MsgUpgradeAnyClient { client_id, ..self } + } + } + + /// Returns a dummy `RawMsgUpgradeClient`, for testing only! + pub fn get_dummy_raw_msg_upgrade_client(height: Height) -> RawMsgUpgradeClient { + RawMsgUpgradeClient { + client_id: "tendermint".parse().unwrap(), + client_state: Some( + AnyClientState::Mock(MockClientState::new(MockHeader::new(height).into())).into(), + ), + consensus_state: Some( + AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new(height))).into(), + ), + proof_upgrade_client: get_dummy_proof(), + proof_upgrade_consensus_state: get_dummy_proof(), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use ibc_proto::ibc::core::client::v1::MsgUpgradeClient as RawMsgUpgradeClient; - - use crate::{ - core::{ - ics02_client::{ - client_consensus::AnyConsensusState, client_state::AnyClientState, height::Height, - msgs::upgrade_client::MsgUpgradeAnyClient, - }, - ics23_commitment::commitment::test_util::get_dummy_merkle_proof, - ics24_host::identifier::ClientId, - }, - mock::{ - client_state::{MockClientState, MockConsensusState}, - header::MockHeader, - }, - test_utils::get_dummy_account_id, - }; - - #[test] - fn msg_upgrade_client_serialization() { - let client_id: ClientId = "tendermint".parse().unwrap(); - let signer = get_dummy_account_id(); - - let height = Height::new(1, 1); - - let client_state = AnyClientState::Mock(MockClientState::new(MockHeader::new(height))); - let consensus_state = - AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new(height))); - - let proof = get_dummy_merkle_proof(); - - let msg = MsgUpgradeAnyClient::new( - client_id, - client_state, - consensus_state, - proof.clone(), - proof, - signer, - ); - let raw: RawMsgUpgradeClient = RawMsgUpgradeClient::from(msg.clone()); - let msg_back = MsgUpgradeAnyClient::try_from(raw.clone()).unwrap(); - let raw_back: RawMsgUpgradeClient = RawMsgUpgradeClient::from(msg_back.clone()); - assert_eq!(msg, msg_back); - assert_eq!(raw, raw_back); - } + use alloc::vec::Vec; + use ibc_proto::ibc::core::client::v1::MsgUpgradeClient as RawMsgUpgradeClient; + + use crate::{ + core::{ + ics02_client::{height::Height, msgs::upgrade_client::MsgUpgradeAnyClient}, + ics23_commitment::commitment::test_util::get_dummy_merkle_proof, + ics24_host::identifier::ClientId, + }, + mock::{ + client_state::{ + AnyClientState, AnyConsensusState, MockClientState, MockConsensusState, + }, + context::{MockClientTypes, MockContext}, + header::MockHeader, + }, + test_utils::get_dummy_account_id, + }; + + #[test] + fn msg_upgrade_client_serialization() { + let client_id: ClientId = "tendermint".parse().unwrap(); + let signer = get_dummy_account_id(); + + let height = Height::new(1, 1); + + let client_state = + AnyClientState::Mock(MockClientState::new(MockHeader::new(height).into())); + let consensus_state = + AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new(height))); + + let proof = get_dummy_merkle_proof(); + let mut proof_buf = Vec::new(); + prost::Message::encode(&proof, &mut proof_buf).unwrap(); + let msg = MsgUpgradeAnyClient::>::new( + client_id, + client_state, + consensus_state, + proof_buf.clone(), + proof_buf, + signer, + ); + let raw: RawMsgUpgradeClient = RawMsgUpgradeClient::from(msg.clone()); + let msg_back = MsgUpgradeAnyClient::try_from(raw.clone()).unwrap(); + let raw_back: RawMsgUpgradeClient = RawMsgUpgradeClient::from(msg_back.clone()); + assert_eq!(msg, msg_back); + assert_eq!(raw, raw_back); + } } diff --git a/modules/src/core/ics02_client/trust_threshold.rs b/modules/src/core/ics02_client/trust_threshold.rs index ca5db5e2f8..5cd70ab8fe 100644 --- a/modules/src/core/ics02_client/trust_threshold.rs +++ b/modules/src/core/ics02_client/trust_threshold.rs @@ -22,109 +22,91 @@ use crate::core::ics02_client::error::Error; /// which is used in the client state of an upgrading client. #[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct TrustThreshold { - numerator: u64, - denominator: u64, + numerator: u64, + denominator: u64, } impl TrustThreshold { - /// Constant for a trust threshold of 1/3. - pub const ONE_THIRD: Self = Self { - numerator: 1, - denominator: 3, - }; - - /// Constant for a trust threshold of 2/3. - pub const TWO_THIRDS: Self = Self { - numerator: 2, - denominator: 3, - }; - - /// Constant for a trust threshold of 0/0. - pub const ZERO: Self = Self { - numerator: 0, - denominator: 0, - }; - - /// Instantiate a TrustThreshold with the given denominator and - /// numerator. - /// - /// The constructor succeeds if long as the resulting fraction - /// is in the range`[0, 1)`. - pub fn new(numerator: u64, denominator: u64) -> Result { - // The two parameters cannot yield a fraction that is bigger or equal to 1 - if (numerator > denominator) - || (denominator == 0 && numerator != 0) - || (numerator == denominator && numerator != 0) - { - return Err(Error::invalid_trust_threshold(numerator, denominator)); - } - - Ok(Self { - numerator, - denominator, - }) - } - - /// The numerator of the fraction underlying this trust threshold. - pub fn numerator(&self) -> u64 { - self.numerator - } - - /// The denominator of the fraction underlying this trust threshold. - pub fn denominator(&self) -> u64 { - self.denominator - } + /// Constant for a trust threshold of 1/3. + pub const ONE_THIRD: Self = Self { numerator: 1, denominator: 3 }; + + /// Constant for a trust threshold of 2/3. + pub const TWO_THIRDS: Self = Self { numerator: 2, denominator: 3 }; + + /// Constant for a trust threshold of 0/0. + pub const ZERO: Self = Self { numerator: 0, denominator: 0 }; + + /// Instantiate a TrustThreshold with the given denominator and + /// numerator. + /// + /// The constructor succeeds if long as the resulting fraction + /// is in the range`[0, 1)`. + pub fn new(numerator: u64, denominator: u64) -> Result { + // The two parameters cannot yield a fraction that is bigger or equal to 1 + if (numerator > denominator) || + (denominator == 0 && numerator != 0) || + (numerator == denominator && numerator != 0) + { + return Err(Error::invalid_trust_threshold(numerator, denominator)) + } + + Ok(Self { numerator, denominator }) + } + + /// The numerator of the fraction underlying this trust threshold. + pub fn numerator(&self) -> u64 { + self.numerator + } + + /// The denominator of the fraction underlying this trust threshold. + pub fn denominator(&self) -> u64 { + self.denominator + } } /// Conversion from Tendermint domain type into /// IBC domain type. impl From for TrustThreshold { - fn from(t: TrustThresholdFraction) -> Self { - Self { - numerator: t.numerator(), - denominator: t.denominator(), - } - } + fn from(t: TrustThresholdFraction) -> Self { + Self { numerator: t.numerator(), denominator: t.denominator() } + } } /// Conversion from IBC domain type into /// Tendermint domain type. impl TryFrom for TrustThresholdFraction { - type Error = Error; + type Error = Error; - fn try_from(t: TrustThreshold) -> Result { - Self::new(t.numerator, t.denominator) - .map_err(|e| Error::failed_trust_threshold_conversion(t.numerator, t.denominator, e)) - } + fn try_from(t: TrustThreshold) -> Result { + Self::new(t.numerator, t.denominator) + .map_err(|e| Error::failed_trust_threshold_conversion(t.numerator, t.denominator, e)) + } } impl Protobuf for TrustThreshold {} impl From for Fraction { - fn from(t: TrustThreshold) -> Self { - Self { - numerator: t.numerator, - denominator: t.denominator, - } - } + fn from(t: TrustThreshold) -> Self { + Self { numerator: t.numerator, denominator: t.denominator } + } } impl TryFrom for TrustThreshold { - type Error = Error; + type Error = Error; - fn try_from(value: Fraction) -> Result { - Self::new(value.numerator, value.denominator) - } + fn try_from(value: Fraction) -> Result { + Self::new(value.numerator, value.denominator) + } } impl Default for TrustThreshold { - fn default() -> Self { - Self::ONE_THIRD - } + fn default() -> Self { + Self::ONE_THIRD + } } impl fmt::Display for TrustThreshold { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}/{}", self.numerator, self.denominator) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}/{}", self.numerator, self.denominator) + } } diff --git a/modules/src/core/ics03_connection/connection.rs b/modules/src/core/ics03_connection/connection.rs index 793501f35b..ad29b1e9b0 100644 --- a/modules/src/core/ics03_connection/connection.rs +++ b/modules/src/core/ics03_connection/connection.rs @@ -1,248 +1,230 @@ use crate::prelude::*; -use core::str::FromStr; -use core::time::Duration; -use core::{fmt, u64}; +use core::{fmt, str::FromStr, time::Duration, u64}; use serde::{Deserialize, Serialize}; use tendermint_proto::Protobuf; use ibc_proto::ibc::core::connection::v1::{ - ConnectionEnd as RawConnectionEnd, Counterparty as RawCounterparty, - IdentifiedConnection as RawIdentifiedConnection, + ConnectionEnd as RawConnectionEnd, Counterparty as RawCounterparty, + IdentifiedConnection as RawIdentifiedConnection, }; -use crate::core::ics02_client::error::Error as ClientError; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::version::Version; -use crate::core::ics23_commitment::commitment::CommitmentPrefix; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; -use crate::timestamp::ZERO_DURATION; +use crate::{ + core::{ + ics02_client::error::Error as ClientError, + ics03_connection::{error::Error, version::Version}, + ics23_commitment::commitment::CommitmentPrefix, + ics24_host::{ + error::ValidationError, + identifier::{ClientId, ConnectionId}, + }, + }, + timestamp::ZERO_DURATION, +}; #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct IdentifiedConnectionEnd { - pub connection_id: ConnectionId, - pub connection_end: ConnectionEnd, + pub connection_id: ConnectionId, + pub connection_end: ConnectionEnd, } impl IdentifiedConnectionEnd { - pub fn new(connection_id: ConnectionId, connection_end: ConnectionEnd) -> Self { - IdentifiedConnectionEnd { - connection_id, - connection_end, - } - } - - pub fn id(&self) -> &ConnectionId { - &self.connection_id - } - - pub fn end(&self) -> &ConnectionEnd { - &self.connection_end - } + pub fn new(connection_id: ConnectionId, connection_end: ConnectionEnd) -> Self { + IdentifiedConnectionEnd { connection_id, connection_end } + } + + pub fn id(&self) -> &ConnectionId { + &self.connection_id + } + + pub fn end(&self) -> &ConnectionEnd { + &self.connection_end + } } impl Protobuf for IdentifiedConnectionEnd {} impl TryFrom for IdentifiedConnectionEnd { - type Error = Error; - - fn try_from(value: RawIdentifiedConnection) -> Result { - let raw_connection_end = RawConnectionEnd { - client_id: value.client_id.to_string(), - versions: value.versions, - state: value.state, - counterparty: value.counterparty, - delay_period: value.delay_period, - }; - - Ok(IdentifiedConnectionEnd { - connection_id: value.id.parse().map_err(Error::invalid_identifier)?, - connection_end: raw_connection_end.try_into()?, - }) - } + type Error = Error; + + fn try_from(value: RawIdentifiedConnection) -> Result { + let raw_connection_end = RawConnectionEnd { + client_id: value.client_id.to_string(), + versions: value.versions, + state: value.state, + counterparty: value.counterparty, + delay_period: value.delay_period, + }; + + Ok(IdentifiedConnectionEnd { + connection_id: value.id.parse().map_err(Error::invalid_identifier)?, + connection_end: raw_connection_end.try_into()?, + }) + } } impl From for RawIdentifiedConnection { - fn from(value: IdentifiedConnectionEnd) -> Self { - RawIdentifiedConnection { - id: value.connection_id.to_string(), - client_id: value.connection_end.client_id.to_string(), - versions: value - .connection_end - .versions - .iter() - .map(|v| From::from(v.clone())) - .collect(), - state: value.connection_end.state as i32, - delay_period: value.connection_end.delay_period.as_nanos() as u64, - counterparty: Some(value.connection_end.counterparty().clone().into()), - } - } + fn from(value: IdentifiedConnectionEnd) -> Self { + RawIdentifiedConnection { + id: value.connection_id.to_string(), + client_id: value.connection_end.client_id.to_string(), + versions: value.connection_end.versions.iter().map(|v| From::from(v.clone())).collect(), + state: value.connection_end.state as i32, + delay_period: value.connection_end.delay_period.as_nanos() as u64, + counterparty: Some(value.connection_end.counterparty().clone().into()), + } + } } #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct ConnectionEnd { - pub state: State, - client_id: ClientId, - counterparty: Counterparty, - versions: Vec, - delay_period: Duration, + pub state: State, + client_id: ClientId, + counterparty: Counterparty, + versions: Vec, + delay_period: Duration, } impl Default for ConnectionEnd { - fn default() -> Self { - Self { - state: State::Uninitialized, - client_id: Default::default(), - counterparty: Default::default(), - versions: Vec::new(), - delay_period: ZERO_DURATION, - } - } + fn default() -> Self { + Self { + state: State::Uninitialized, + client_id: Default::default(), + counterparty: Default::default(), + versions: Vec::new(), + delay_period: ZERO_DURATION, + } + } } impl Protobuf for ConnectionEnd {} impl TryFrom for ConnectionEnd { - type Error = Error; - fn try_from(value: RawConnectionEnd) -> Result { - let state = value.state.try_into()?; - if state == State::Uninitialized { - return Ok(ConnectionEnd::default()); - } - if value.client_id.is_empty() { - return Err(Error::empty_proto_connection_end()); - } - - Ok(Self::new( - state, - value.client_id.parse().map_err(Error::invalid_identifier)?, - value - .counterparty - .ok_or_else(Error::missing_counterparty)? - .try_into()?, - value - .versions - .into_iter() - .map(Version::try_from) - .collect::, _>>()?, - Duration::from_nanos(value.delay_period), - )) - } + type Error = Error; + fn try_from(value: RawConnectionEnd) -> Result { + let state = value.state.try_into()?; + if state == State::Uninitialized { + return Ok(ConnectionEnd::default()) + } + if value.client_id.is_empty() { + return Err(Error::empty_proto_connection_end()) + } + + Ok(Self::new( + state, + value.client_id.parse().map_err(Error::invalid_identifier)?, + value.counterparty.ok_or_else(Error::missing_counterparty)?.try_into()?, + value + .versions + .into_iter() + .map(Version::try_from) + .collect::, _>>()?, + Duration::from_nanos(value.delay_period), + )) + } } impl From for RawConnectionEnd { - fn from(value: ConnectionEnd) -> Self { - RawConnectionEnd { - client_id: value.client_id.to_string(), - versions: value - .versions - .iter() - .map(|v| From::from(v.clone())) - .collect(), - state: value.state as i32, - counterparty: Some(value.counterparty.into()), - delay_period: value.delay_period.as_nanos() as u64, - } - } + fn from(value: ConnectionEnd) -> Self { + RawConnectionEnd { + client_id: value.client_id.to_string(), + versions: value.versions.iter().map(|v| From::from(v.clone())).collect(), + state: value.state as i32, + counterparty: Some(value.counterparty.into()), + delay_period: value.delay_period.as_nanos() as u64, + } + } } impl ConnectionEnd { - pub fn new( - state: State, - client_id: ClientId, - counterparty: Counterparty, - versions: Vec, - delay_period: Duration, - ) -> Self { - Self { - state, - client_id, - counterparty, - versions, - delay_period, - } - } - - /// Getter for the state of this connection end. - pub fn state(&self) -> &State { - &self.state - } - - /// Setter for the `state` field. - pub fn set_state(&mut self, new_state: State) { - self.state = new_state; - } - - /// Setter for the `counterparty` field. - pub fn set_counterparty(&mut self, new_cparty: Counterparty) { - self.counterparty = new_cparty; - } - - /// Setter for the `version` field. - pub fn set_version(&mut self, new_version: Version) { - self.versions = vec![new_version]; - } - - /// Helper function to compare the counterparty of this end with another counterparty. - pub fn counterparty_matches(&self, other: &Counterparty) -> bool { - self.counterparty.eq(other) - } - - /// Helper function to compare the client id of this end with another client identifier. - pub fn client_id_matches(&self, other: &ClientId) -> bool { - self.client_id.eq(other) - } - - /// Helper function to determine whether the connection is open. - pub fn is_open(&self) -> bool { - self.state_matches(&State::Open) - } - - /// Helper function to determine whether the connection is uninitialized. - pub fn is_uninitialized(&self) -> bool { - self.state_matches(&State::Uninitialized) - } - - /// Helper function to compare the state of this end with another state. - pub fn state_matches(&self, other: &State) -> bool { - self.state.eq(other) - } - - /// Getter for the client id on the local party of this connection end. - pub fn client_id(&self) -> &ClientId { - &self.client_id - } - - /// Getter for the list of versions in this connection end. - pub fn versions(&self) -> &[Version] { - &self.versions - } - - /// Getter for the counterparty. - pub fn counterparty(&self) -> &Counterparty { - &self.counterparty - } - - /// Getter for the delay_period field. This represents the duration, at minimum, - /// to delay the sending of a packet after the client update for that packet has been submitted. - pub fn delay_period(&self) -> Duration { - self.delay_period - } - - /// TODO: Clean this up, probably not necessary. - pub fn validate_basic(&self) -> Result<(), ValidationError> { - self.counterparty.validate_basic() - } + pub fn new( + state: State, + client_id: ClientId, + counterparty: Counterparty, + versions: Vec, + delay_period: Duration, + ) -> Self { + Self { state, client_id, counterparty, versions, delay_period } + } + + /// Getter for the state of this connection end. + pub fn state(&self) -> &State { + &self.state + } + + /// Setter for the `state` field. + pub fn set_state(&mut self, new_state: State) { + self.state = new_state; + } + + /// Setter for the `counterparty` field. + pub fn set_counterparty(&mut self, new_cparty: Counterparty) { + self.counterparty = new_cparty; + } + + /// Setter for the `version` field. + pub fn set_version(&mut self, new_version: Version) { + self.versions = vec![new_version]; + } + + /// Helper function to compare the counterparty of this end with another counterparty. + pub fn counterparty_matches(&self, other: &Counterparty) -> bool { + self.counterparty.eq(other) + } + + /// Helper function to compare the client id of this end with another client identifier. + pub fn client_id_matches(&self, other: &ClientId) -> bool { + self.client_id.eq(other) + } + + /// Helper function to determine whether the connection is open. + pub fn is_open(&self) -> bool { + self.state_matches(&State::Open) + } + + /// Helper function to determine whether the connection is uninitialized. + pub fn is_uninitialized(&self) -> bool { + self.state_matches(&State::Uninitialized) + } + + /// Helper function to compare the state of this end with another state. + pub fn state_matches(&self, other: &State) -> bool { + self.state.eq(other) + } + + /// Getter for the client id on the local party of this connection end. + pub fn client_id(&self) -> &ClientId { + &self.client_id + } + + /// Getter for the list of versions in this connection end. + pub fn versions(&self) -> &[Version] { + &self.versions + } + + /// Getter for the counterparty. + pub fn counterparty(&self) -> &Counterparty { + &self.counterparty + } + + /// Getter for the delay_period field. This represents the duration, at minimum, + /// to delay the sending of a packet after the client update for that packet has been submitted. + pub fn delay_period(&self) -> Duration { + self.delay_period + } + + /// TODO: Clean this up, probably not necessary. + pub fn validate_basic(&self) -> Result<(), ValidationError> { + self.counterparty.validate_basic() + } } #[derive(Clone, Debug, Default, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct Counterparty { - client_id: ClientId, - pub connection_id: Option, - prefix: CommitmentPrefix, + client_id: ClientId, + pub connection_id: Option, + prefix: CommitmentPrefix, } impl Protobuf for Counterparty {} @@ -250,143 +232,139 @@ impl Protobuf for Counterparty {} // Converts from the wire format RawCounterparty. Typically used from the relayer side // during queries for response validation and to extract the Counterparty structure. impl TryFrom for Counterparty { - type Error = Error; - - fn try_from(value: RawCounterparty) -> Result { - let connection_id = Some(value.connection_id) - .filter(|x| !x.is_empty()) - .map(|v| FromStr::from_str(v.as_str())) - .transpose() - .map_err(Error::invalid_identifier)?; - Ok(Counterparty::new( - value.client_id.parse().map_err(Error::invalid_identifier)?, - connection_id, - value - .prefix - .ok_or_else(Error::missing_counterparty)? - .key_prefix - .try_into() - .map_err(|_| Error::ics02_client(ClientError::empty_prefix()))?, - )) - } + type Error = Error; + + fn try_from(value: RawCounterparty) -> Result { + let connection_id = Some(value.connection_id) + .filter(|x| !x.is_empty()) + .map(|v| FromStr::from_str(v.as_str())) + .transpose() + .map_err(Error::invalid_identifier)?; + Ok(Counterparty::new( + value.client_id.parse().map_err(Error::invalid_identifier)?, + connection_id, + value + .prefix + .ok_or_else(Error::missing_counterparty)? + .key_prefix + .try_into() + .map_err(|_| Error::ics02_client(ClientError::empty_prefix()))?, + )) + } } impl From for RawCounterparty { - fn from(value: Counterparty) -> Self { - RawCounterparty { - client_id: value.client_id.as_str().to_string(), - connection_id: value - .connection_id - .map_or_else(|| "".to_string(), |v| v.as_str().to_string()), - prefix: Some(ibc_proto::ibc::core::commitment::v1::MerklePrefix { - key_prefix: value.prefix.into_vec(), - }), - } - } + fn from(value: Counterparty) -> Self { + RawCounterparty { + client_id: value.client_id.as_str().to_string(), + connection_id: value + .connection_id + .map_or_else(|| "".to_string(), |v| v.as_str().to_string()), + prefix: Some(ibc_proto::ibc::core::commitment::v1::MerklePrefix { + key_prefix: value.prefix.into_vec(), + }), + } + } } impl Counterparty { - pub fn new( - client_id: ClientId, - connection_id: Option, - prefix: CommitmentPrefix, - ) -> Self { - Self { - client_id, - connection_id, - prefix, - } - } - - /// Getter for the client id. - pub fn client_id(&self) -> &ClientId { - &self.client_id - } - - /// Getter for connection id. - pub fn connection_id(&self) -> Option<&ConnectionId> { - self.connection_id.as_ref() - } - - pub fn prefix(&self) -> &CommitmentPrefix { - &self.prefix - } - - pub fn validate_basic(&self) -> Result<(), ValidationError> { - Ok(()) - } + pub fn new( + client_id: ClientId, + connection_id: Option, + prefix: CommitmentPrefix, + ) -> Self { + Self { client_id, connection_id, prefix } + } + + /// Getter for the client id. + pub fn client_id(&self) -> &ClientId { + &self.client_id + } + + /// Getter for connection id. + pub fn connection_id(&self) -> Option<&ConnectionId> { + self.connection_id.as_ref() + } + + pub fn prefix(&self) -> &CommitmentPrefix { + &self.prefix + } + + pub fn validate_basic(&self) -> Result<(), ValidationError> { + Ok(()) + } } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] pub enum State { - Uninitialized = 0, - Init = 1, - TryOpen = 2, - Open = 3, + Uninitialized = 0, + Init = 1, + TryOpen = 2, + Open = 3, } impl State { - /// Yields the State as a string. - pub fn as_str(&self) -> &'static str { - match self { - Self::Uninitialized => "UNINITIALIZED", - Self::Init => "INIT", - Self::TryOpen => "TRYOPEN", - Self::Open => "OPEN", - } - } - - /// Parses the State out from a i32. - pub fn from_i32(s: i32) -> Result { - match s { - 0 => Ok(Self::Uninitialized), - 1 => Ok(Self::Init), - 2 => Ok(Self::TryOpen), - 3 => Ok(Self::Open), - _ => Err(Error::invalid_state(s)), - } - } - - /// Returns whether or not this connection state is `Open`. - pub fn is_open(self) -> bool { - self == State::Open - } - - /// Returns whether or not this connection with this state - /// has progressed less or the same than the argument. - /// - /// # Example - /// ```rust,ignore - /// assert!(State::Init.less_or_equal_progress(State::Open)); - /// assert!(State::TryOpen.less_or_equal_progress(State::TryOpen)); - /// assert!(!State::Open.less_or_equal_progress(State::Uninitialized)); - /// ``` - pub fn less_or_equal_progress(self, other: Self) -> bool { - self as u32 <= other as u32 - } + /// Yields the State as a string. + pub fn as_str(&self) -> &'static str { + match self { + Self::Uninitialized => "UNINITIALIZED", + Self::Init => "INIT", + Self::TryOpen => "TRYOPEN", + Self::Open => "OPEN", + } + } + + /// Parses the State out from a i32. + pub fn from_i32(s: i32) -> Result { + match s { + 0 => Ok(Self::Uninitialized), + 1 => Ok(Self::Init), + 2 => Ok(Self::TryOpen), + 3 => Ok(Self::Open), + _ => Err(Error::invalid_state(s)), + } + } + + /// Returns whether or not this connection state is `Open`. + pub fn is_open(self) -> bool { + self == State::Open + } + + /// Returns whether or not this connection with this state + /// has progressed less or the same than the argument. + /// + /// # Example + /// ```rust,ignore + /// assert!(State::Init.less_or_equal_progress(State::Open)); + /// assert!(State::TryOpen.less_or_equal_progress(State::TryOpen)); + /// assert!(!State::Open.less_or_equal_progress(State::Uninitialized)); + /// ``` + pub fn less_or_equal_progress(self, other: Self) -> bool { + self as u32 <= other as u32 + } } impl fmt::Display for State { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.as_str()) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.as_str()) + } } impl TryFrom for State { - type Error = Error; - fn try_from(value: i32) -> Result { - match value { - 0 => Ok(Self::Uninitialized), - 1 => Ok(Self::Init), - 2 => Ok(Self::TryOpen), - 3 => Ok(Self::Open), - _ => Err(Error::invalid_state(value)), - } - } + type Error = Error; + fn try_from(value: i32) -> Result { + match value { + 0 => Ok(Self::Uninitialized), + 1 => Ok(Self::Init), + 2 => Ok(Self::TryOpen), + 3 => Ok(Self::Open), + _ => Err(Error::invalid_state(value)), + } + } } impl From for i32 { - fn from(value: State) -> Self { - value.into() - } + fn from(value: State) -> Self { + value.into() + } } diff --git a/modules/src/core/ics03_connection/context.rs b/modules/src/core/ics03_connection/context.rs index 3a29f23a6d..bbb8f78dfc 100644 --- a/modules/src/core/ics03_connection/context.rs +++ b/modules/src/core/ics03_connection/context.rs @@ -2,103 +2,91 @@ //! the interface that any host chain must implement to be able to process any `ConnectionMsg`. //! See "ADR 003: IBC protocol implementation" for more details. -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; -use crate::core::ics03_connection::version::{get_compatible_versions, pick_version, Version}; -use crate::core::ics23_commitment::commitment::CommitmentPrefix; -use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; -use crate::prelude::*; -use crate::Height; +use crate::{ + core::{ + ics03_connection::{ + connection::ConnectionEnd, + error::Error, + handler::{ConnectionIdState, ConnectionResult}, + version::{get_compatible_versions, pick_version, Version}, + }, + ics23_commitment::commitment::CommitmentPrefix, + ics24_host::identifier::{ClientId, ConnectionId}, + }, + prelude::*, + Height, +}; /// A context supplying all the necessary read-only dependencies for processing any `ConnectionMsg`. pub trait ConnectionReader { - /// Returns the ConnectionEnd for the given identifier `conn_id`. - fn connection_end(&self, conn_id: &ConnectionId) -> Result; - - /// Returns the ClientState for the given identifier `client_id`. - fn client_state(&self, client_id: &ClientId) -> Result; - - /// Returns the current height of the local chain. - fn host_current_height(&self) -> Height; - - /// Returns the oldest height available on the local chain. - fn host_oldest_height(&self) -> Height; - - /// Returns the prefix that the local chain uses in the KV store. - fn commitment_prefix(&self) -> CommitmentPrefix; - - /// Returns the ConsensusState that the given client stores at a specific height. - fn client_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result; - - /// Returns the ConsensusState of the host (local) chain at a specific height. - fn host_consensus_state(&self, height: Height) -> Result; - - /// Function required by ICS 03. Returns the list of all possible versions that the connection - /// handshake protocol supports. - fn get_compatible_versions(&self) -> Vec { - get_compatible_versions() - } - - /// Function required by ICS 03. Returns one version out of the supplied list of versions, which the - /// connection handshake protocol prefers. - fn pick_version( - &self, - supported_versions: Vec, - counterparty_candidate_versions: Vec, - ) -> Result { - pick_version(supported_versions, counterparty_candidate_versions) - } - - /// Returns a counter on how many connections have been created thus far. - /// The value of this counter should increase only via method - /// `ConnectionKeeper::increase_connection_counter`. - fn connection_counter(&self) -> Result; + /// Returns the ConnectionEnd for the given identifier `conn_id`. + fn connection_end(&self, conn_id: &ConnectionId) -> Result; + + /// Returns the oldest height available on the local chain. + fn host_oldest_height(&self) -> Height; + + /// Returns the prefix that the local chain uses in the KV store. + fn commitment_prefix(&self) -> CommitmentPrefix; + + /// Function required by ICS 03. Returns the list of all possible versions that the connection + /// handshake protocol supports. + fn get_compatible_versions(&self) -> Vec { + get_compatible_versions() + } + + /// Function required by ICS 03. Returns one version out of the supplied list of versions, which + /// the connection handshake protocol prefers. + fn pick_version( + &self, + supported_versions: Vec, + counterparty_candidate_versions: Vec, + ) -> Result { + pick_version(supported_versions, counterparty_candidate_versions) + } + + /// Returns a counter on how many connections have been created thus far. + /// The value of this counter should increase only via method + /// `ConnectionKeeper::increase_connection_counter`. + fn connection_counter(&self) -> Result; } /// A context supplying all the necessary write-only dependencies (i.e., storage writing facility) /// for processing any `ConnectionMsg`. pub trait ConnectionKeeper { - fn store_connection_result(&mut self, result: ConnectionResult) -> Result<(), Error> { - self.store_connection(result.connection_id.clone(), &result.connection_end)?; - - // If we generated an identifier, increase the counter & associate this new identifier - // with the client id. - if matches!(result.connection_id_state, ConnectionIdState::Generated) { - self.increase_connection_counter(); - - // Also associate the connection end to its client identifier. - self.store_connection_to_client( - result.connection_id.clone(), - result.connection_end.client_id(), - )?; - } - - Ok(()) - } - - /// Stores the given connection_end at a path associated with the connection_id. - fn store_connection( - &mut self, - connection_id: ConnectionId, - connection_end: &ConnectionEnd, - ) -> Result<(), Error>; - - /// Stores the given connection_id at a path associated with the client_id. - fn store_connection_to_client( - &mut self, - connection_id: ConnectionId, - client_id: &ClientId, - ) -> Result<(), Error>; - - /// Called upon connection identifier creation (Init or Try process). - /// Increases the counter which keeps track of how many connections have been created. - /// Should never fail. - fn increase_connection_counter(&mut self); + fn store_connection_result(&mut self, result: ConnectionResult) -> Result<(), Error> { + self.store_connection(result.connection_id.clone(), &result.connection_end)?; + + // If we generated an identifier, increase the counter & associate this new identifier + // with the client id. + if matches!(result.connection_id_state, ConnectionIdState::Generated) { + self.increase_connection_counter(); + + // Also associate the connection end to its client identifier. + self.store_connection_to_client( + result.connection_id.clone(), + result.connection_end.client_id(), + )?; + } + + Ok(()) + } + + /// Stores the given connection_end at a path associated with the connection_id. + fn store_connection( + &mut self, + connection_id: ConnectionId, + connection_end: &ConnectionEnd, + ) -> Result<(), Error>; + + /// Stores the given connection_id at a path associated with the client_id. + fn store_connection_to_client( + &mut self, + connection_id: ConnectionId, + client_id: &ClientId, + ) -> Result<(), Error>; + + /// Called upon connection identifier creation (Init or Try process). + /// Increases the counter which keeps track of how many connections have been created. + /// Should never fail. + fn increase_connection_counter(&mut self); } diff --git a/modules/src/core/ics03_connection/error.rs b/modules/src/core/ics03_connection/error.rs index 619f14b2e1..e4fce3d2dc 100644 --- a/modules/src/core/ics03_connection/error.rs +++ b/modules/src/core/ics03_connection/error.rs @@ -1,161 +1,169 @@ -use crate::core::ics02_client::error as client_error; -use crate::core::ics03_connection::version::Version; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; -use crate::proofs::ProofError; -use crate::signer::SignerError; -use crate::Height; +use crate::{ + core::{ + ics02_client::error as client_error, + ics03_connection::version::Version, + ics24_host::{ + error::ValidationError, + identifier::{ClientId, ConnectionId}, + }, + }, + prelude::*, + proofs::ProofError, + signer::SignerError, + Height, +}; use flex_error::define_error; define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - Ics02Client - [ client_error::Error ] - | _ | { "ics02 client error" }, - - InvalidState - { state: i32 } - | e | { format_args!("connection state is unknown: {}", e.state) }, - - ConnectionExistsAlready - { connection_id: ConnectionId } - | e | { - format_args!("connection exists (was initialized) already: {0}", - e.connection_id) - }, - - ConnectionMismatch - { connection_id: ConnectionId } - | e | { - format_args!("connection end for identifier {0} was never initialized", - e.connection_id) - }, - - InvalidConsensusHeight - { - target_height: Height, - currrent_height: Height - } - | e | { - format_args!("consensus height claimed by the client on the other party is too advanced: {0} (host chain current height: {1})", - e.target_height, e.currrent_height) - }, - - StaleConsensusHeight - { - target_height: Height, - oldest_height: Height - } - | e | { - format_args!("consensus height claimed by the client on the other party has been pruned: {0} (host chain oldest height: {1})", - e.target_height, e.oldest_height) - }, - - InvalidIdentifier - [ ValidationError ] - | _ | { "identifier error" }, - - EmptyProtoConnectionEnd - | _ | { "ConnectionEnd domain object could not be constructed out of empty proto object" }, - - EmptyVersions - | _ | { "empty supported versions" }, - - EmptyFeatures - | _ | { "empty supported features" }, - - NoCommonVersion - | _ | { "no common version" }, - - VersionNotSupported - { - version: Version, - } - | e | { format_args!("version \"{}\" not supported", e.version) }, - - InvalidAddress - | _ | { "invalid address" }, - - MissingProofHeight - | _ | { "missing proof height" }, - - MissingConsensusHeight - | _ | { "missing consensus height" }, - - InvalidProof - [ ProofError ] - | _ | { "invalid connection proof" }, - - VerifyConnectionState - [ client_error::Error ] - | _ | { "error verifying connnection state" }, - - Signer - [ SignerError ] - | _ | { "invalid signer" }, - - ConnectionNotFound - { connection_id: ConnectionId } - | e | { - format_args!("no connection was found for the previous connection id provided {0}", - e.connection_id) - }, - - InvalidCounterparty - | _ | { "invalid signer" }, - - ConnectionIdMismatch - { - connection_id: ConnectionId, - counterparty_connection_id: ConnectionId, - } - | e | { - format_args!("counterparty chosen connection id {0} is different than the connection id {1}", - e.connection_id, e.counterparty_connection_id) - }, - - MissingCounterparty - | _ | { "missing counterparty" }, - - - MissingCounterpartyPrefix - | _ | { "missing counterparty prefix" }, - - NullClientProof - | _ | { "client proof must be present" }, - - FrozenClient - { client_id: ClientId } - | e | { - format_args!("the client id does not match any client state: {0}", - e.client_id) - }, - - ConnectionVerificationFailure - | _ | { "the connection proof verification failed" }, - - ConsensusStateVerificationFailure - { height: Height } - [ client_error::Error ] - | e | { - format_args!("the consensus proof verification failed (height: {0})", - e.height) - }, - - // TODO: use more specific error source - ClientStateVerificationFailure - { - client_id: ClientId, - } - [ client_error::Error ] - | e | { - format_args!("the client state proof verification failed for client id {0}", - e.client_id) - }, - - ImplementationSpecific - | _ | { "implementation specific error" }, - } + #[derive(Debug, PartialEq, Eq)] + Error { + Ics02Client + [ client_error::Error ] + | _ | { "ics02 client error" }, + + InvalidState + { state: i32 } + | e | { format_args!("connection state is unknown: {}", e.state) }, + + ConnectionExistsAlready + { connection_id: ConnectionId } + | e | { + format_args!("connection exists (was initialized) already: {0}", + e.connection_id) + }, + + ConnectionMismatch + { connection_id: ConnectionId } + | e | { + format_args!("connection end for identifier {0} was never initialized", + e.connection_id) + }, + + InvalidConsensusHeight + { + target_height: Height, + currrent_height: Height + } + | e | { + format_args!("consensus height claimed by the client on the other party is too advanced: {0} (host chain current height: {1})", + e.target_height, e.currrent_height) + }, + + StaleConsensusHeight + { + target_height: Height, + oldest_height: Height + } + | e | { + format_args!("consensus height claimed by the client on the other party has been pruned: {0} (host chain oldest height: {1})", + e.target_height, e.oldest_height) + }, + + InvalidIdentifier + [ ValidationError ] + | _ | { "identifier error" }, + + EmptyProtoConnectionEnd + | _ | { "ConnectionEnd domain object could not be constructed out of empty proto object" }, + + EmptyVersions + | _ | { "empty supported versions" }, + + EmptyFeatures + | _ | { "empty supported features" }, + + NoCommonVersion + | _ | { "no common version" }, + + VersionNotSupported + { + version: Version, + } + | e | { format_args!("version \"{}\" not supported", e.version) }, + + InvalidAddress + | _ | { "invalid address" }, + + MissingProofHeight + | _ | { "missing proof height" }, + + MissingConsensusHeight + | _ | { "missing consensus height" }, + + InvalidProof + [ ProofError ] + | _ | { "invalid connection proof" }, + + VerifyConnectionState + [ client_error::Error ] + | _ | { "error verifying connnection state" }, + + Signer + [ SignerError ] + | _ | { "invalid signer" }, + + ConnectionNotFound + { connection_id: ConnectionId } + | e | { + format_args!("no connection was found for the previous connection id provided {0}", + e.connection_id) + }, + + InvalidCounterparty + | _ | { "invalid signer" }, + + ConnectionIdMismatch + { + connection_id: ConnectionId, + counterparty_connection_id: ConnectionId, + } + | e | { + format_args!("counterparty chosen connection id {0} is different than the connection id {1}", + e.connection_id, e.counterparty_connection_id) + }, + + MissingCounterparty + | _ | { "missing counterparty" }, + + + MissingCounterpartyPrefix + | _ | { "missing counterparty prefix" }, + + NullClientProof + | _ | { "client proof must be present" }, + + FrozenClient + { client_id: ClientId } + | e | { + format_args!("the client id does not match any client state: {0}", + e.client_id) + }, + + ConnectionVerificationFailure + | _ | { "the connection proof verification failed" }, + + ConsensusStateVerificationFailure + { height: Height } + [ client_error::Error ] + | e | { + format_args!("the consensus proof verification failed (height: {0})", + e.height) + }, + + // TODO: use more specific error source + ClientStateVerificationFailure + { + client_id: ClientId, + } + [ client_error::Error ] + | e | { + format_args!("the client state proof verification failed for client id {0}", + e.client_id) + }, + + ImplementationSpecific + { reason: String } + | e | { format_args!("implementation specific error: {}", e.reason) }, + } } diff --git a/modules/src/core/ics03_connection/events.rs b/modules/src/core/ics03_connection/events.rs index 702747c147..df5f41f617 100644 --- a/modules/src/core/ics03_connection/events.rs +++ b/modules/src/core/ics03_connection/events.rs @@ -1,15 +1,17 @@ //! Types for the IBC events emitted from Tendermint Websocket by the connection module. use serde_derive::{Deserialize, Serialize}; -use tendermint::abci::tag::Tag; -use tendermint::abci::Event as AbciEvent; - -use crate::core::ics02_client::error::Error as Ics02Error; -use crate::core::ics02_client::height::Height; -use crate::core::ics03_connection::error::Error; -use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; -use crate::events::{IbcEvent, IbcEventType}; -use crate::prelude::*; +use tendermint::abci::{Event as AbciEvent, EventAttribute}; + +use crate::{ + core::{ + ics02_client::{error::Error as Ics02Error, height::Height}, + ics03_connection::error::Error, + ics24_host::identifier::{ClientId, ConnectionId}, + }, + events::{IbcEvent, IbcEventType}, + prelude::*, +}; /// The content of the `key` field for the attribute containing the connection identifier. const HEIGHT_ATTRIBUTE_KEY: &str = "height"; @@ -19,65 +21,65 @@ const COUNTERPARTY_CONN_ID_ATTRIBUTE_KEY: &str = "counterparty_connection_id"; const COUNTERPARTY_CLIENT_ID_ATTRIBUTE_KEY: &str = "counterparty_client_id"; pub fn try_from_tx(event: &tendermint::abci::Event) -> Option { - match event.type_str.parse() { - Ok(IbcEventType::OpenInitConnection) => extract_attributes_from_tx(event) - .map(OpenInit::from) - .map(IbcEvent::OpenInitConnection) - .ok(), - Ok(IbcEventType::OpenTryConnection) => extract_attributes_from_tx(event) - .map(OpenTry::from) - .map(IbcEvent::OpenTryConnection) - .ok(), - Ok(IbcEventType::OpenAckConnection) => extract_attributes_from_tx(event) - .map(OpenAck::from) - .map(IbcEvent::OpenAckConnection) - .ok(), - Ok(IbcEventType::OpenConfirmConnection) => extract_attributes_from_tx(event) - .map(OpenConfirm::from) - .map(IbcEvent::OpenConfirmConnection) - .ok(), - _ => None, - } + match event.kind.parse() { + Ok(IbcEventType::OpenInitConnection) => extract_attributes_from_tx(event) + .map(OpenInit::from) + .map(IbcEvent::OpenInitConnection) + .ok(), + Ok(IbcEventType::OpenTryConnection) => extract_attributes_from_tx(event) + .map(OpenTry::from) + .map(IbcEvent::OpenTryConnection) + .ok(), + Ok(IbcEventType::OpenAckConnection) => extract_attributes_from_tx(event) + .map(OpenAck::from) + .map(IbcEvent::OpenAckConnection) + .ok(), + Ok(IbcEventType::OpenConfirmConnection) => extract_attributes_from_tx(event) + .map(OpenConfirm::from) + .map(IbcEvent::OpenConfirmConnection) + .ok(), + _ => None, + } } fn extract_attributes_from_tx(event: &tendermint::abci::Event) -> Result { - let mut attr = Attributes::default(); - - for tag in &event.attributes { - let key = tag.key.as_ref(); - let value = tag.value.as_ref(); - match key { - HEIGHT_ATTRIBUTE_KEY => { - attr.height = value.parse().map_err(|e| { - Error::ics02_client(Ics02Error::invalid_string_as_height(value.to_string(), e)) - })?; - } - CONN_ID_ATTRIBUTE_KEY => { - attr.connection_id = value.parse().ok(); - } - CLIENT_ID_ATTRIBUTE_KEY => { - attr.client_id = value.parse().map_err(Error::invalid_identifier)?; - } - COUNTERPARTY_CONN_ID_ATTRIBUTE_KEY => { - attr.counterparty_connection_id = value.parse().ok(); - } - COUNTERPARTY_CLIENT_ID_ATTRIBUTE_KEY => { - attr.counterparty_client_id = value.parse().map_err(Error::invalid_identifier)?; - } - _ => {} - } - } - - Ok(attr) + let mut attr = Attributes::default(); + + for tag in &event.attributes { + let key = tag.key.as_str(); + let value = tag.value.as_str(); + match key { + HEIGHT_ATTRIBUTE_KEY => { + attr.height = value.parse().map_err(|e| { + Error::ics02_client(Ics02Error::invalid_string_as_height(value.to_string(), e)) + })?; + }, + CONN_ID_ATTRIBUTE_KEY => { + attr.connection_id = value.parse().ok(); + }, + CLIENT_ID_ATTRIBUTE_KEY => { + attr.client_id = value.parse().map_err(Error::invalid_identifier)?; + }, + COUNTERPARTY_CONN_ID_ATTRIBUTE_KEY => { + attr.counterparty_connection_id = value.parse().ok(); + }, + COUNTERPARTY_CLIENT_ID_ATTRIBUTE_KEY => { + attr.counterparty_client_id = value.parse().map_err(Error::invalid_identifier)?; + }, + _ => {}, + } + } + + Ok(attr) } #[derive(Debug, Default, Deserialize, Serialize, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Attributes { - pub height: Height, - pub connection_id: Option, - pub client_id: ClientId, - pub counterparty_connection_id: Option, - pub counterparty_client_id: ClientId, + pub height: Height, + pub connection_id: Option, + pub client_id: ClientId, + pub counterparty_connection_id: Option, + pub counterparty_client_id: ClientId, } /// Convert attributes to Tendermint ABCI tags @@ -88,237 +90,230 @@ pub struct Attributes { /// is infallible, even if it is not represented in the error type. /// Once tendermint-rs improves the API of the `Key` and `Value` types, /// we will be able to remove the `.parse().unwrap()` calls. -impl From for Vec { - fn from(a: Attributes) -> Self { - let mut attributes = vec![]; - let height = Tag { - key: HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), - value: a.height.to_string().parse().unwrap(), - }; - attributes.push(height); - if let Some(conn_id) = a.connection_id { - let conn_id = Tag { - key: CONN_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: conn_id.to_string().parse().unwrap(), - }; - attributes.push(conn_id); - } - let client_id = Tag { - key: CLIENT_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: a.client_id.to_string().parse().unwrap(), - }; - attributes.push(client_id); - if let Some(conn_id) = a.counterparty_connection_id { - let conn_id = Tag { - key: COUNTERPARTY_CONN_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: conn_id.to_string().parse().unwrap(), - }; - attributes.push(conn_id); - } - let counterparty_client_id = Tag { - key: COUNTERPARTY_CLIENT_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: a.counterparty_client_id.to_string().parse().unwrap(), - }; - attributes.push(counterparty_client_id); - attributes - } +impl From for Vec { + fn from(a: Attributes) -> Self { + let mut attributes = vec![]; + let height = EventAttribute { + key: HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), + value: a.height.to_string().parse().unwrap(), + index: false, + }; + attributes.push(height); + if let Some(conn_id) = a.connection_id { + let conn_id = EventAttribute { + key: CONN_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: conn_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(conn_id); + } + let client_id = EventAttribute { + key: CLIENT_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: a.client_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(client_id); + if let Some(conn_id) = a.counterparty_connection_id { + let conn_id = EventAttribute { + key: COUNTERPARTY_CONN_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: conn_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(conn_id); + } + let counterparty_client_id = EventAttribute { + key: COUNTERPARTY_CLIENT_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: a.counterparty_client_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(counterparty_client_id); + attributes + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenInit(Attributes); impl OpenInit { - pub fn attributes(&self) -> &Attributes { - &self.0 - } - pub fn connection_id(&self) -> Option<&ConnectionId> { - self.0.connection_id.as_ref() - } - pub fn height(&self) -> Height { - self.0.height - } - pub fn set_height(&mut self, height: Height) { - self.0.height = height; - } + pub fn attributes(&self) -> &Attributes { + &self.0 + } + pub fn connection_id(&self) -> Option<&ConnectionId> { + self.0.connection_id.as_ref() + } + pub fn height(&self) -> Height { + self.0.height + } + pub fn set_height(&mut self, height: Height) { + self.0.height = height; + } } impl From for OpenInit { - fn from(attrs: Attributes) -> Self { - OpenInit(attrs) - } + fn from(attrs: Attributes) -> Self { + OpenInit(attrs) + } } impl From for IbcEvent { - fn from(v: OpenInit) -> Self { - IbcEvent::OpenInitConnection(v) - } + fn from(v: OpenInit) -> Self { + IbcEvent::OpenInitConnection(v) + } } impl From for AbciEvent { - fn from(v: OpenInit) -> Self { - let attributes = Vec::::from(v.0); - AbciEvent { - type_str: IbcEventType::OpenInitConnection.as_str().to_string(), - attributes, - } - } + fn from(v: OpenInit) -> Self { + let attributes = Vec::::from(v.0); + AbciEvent { kind: IbcEventType::OpenInitConnection.as_str().to_string(), attributes } + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenTry(Attributes); impl OpenTry { - pub fn attributes(&self) -> &Attributes { - &self.0 - } - pub fn connection_id(&self) -> Option<&ConnectionId> { - self.0.connection_id.as_ref() - } - pub fn height(&self) -> Height { - self.0.height - } - pub fn set_height(&mut self, height: Height) { - self.0.height = height; - } + pub fn attributes(&self) -> &Attributes { + &self.0 + } + pub fn connection_id(&self) -> Option<&ConnectionId> { + self.0.connection_id.as_ref() + } + pub fn height(&self) -> Height { + self.0.height + } + pub fn set_height(&mut self, height: Height) { + self.0.height = height; + } } impl From for OpenTry { - fn from(attrs: Attributes) -> Self { - OpenTry(attrs) - } + fn from(attrs: Attributes) -> Self { + OpenTry(attrs) + } } impl From for IbcEvent { - fn from(v: OpenTry) -> Self { - IbcEvent::OpenTryConnection(v) - } + fn from(v: OpenTry) -> Self { + IbcEvent::OpenTryConnection(v) + } } impl From for AbciEvent { - fn from(v: OpenTry) -> Self { - let attributes = Vec::::from(v.0); - AbciEvent { - type_str: IbcEventType::OpenTryConnection.as_str().to_string(), - attributes, - } - } + fn from(v: OpenTry) -> Self { + let attributes = Vec::::from(v.0); + AbciEvent { kind: IbcEventType::OpenTryConnection.as_str().to_string(), attributes } + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenAck(Attributes); impl OpenAck { - pub fn attributes(&self) -> &Attributes { - &self.0 - } - pub fn connection_id(&self) -> Option<&ConnectionId> { - self.0.connection_id.as_ref() - } - pub fn height(&self) -> Height { - self.0.height - } - pub fn set_height(&mut self, height: Height) { - self.0.height = height; - } + pub fn attributes(&self) -> &Attributes { + &self.0 + } + pub fn connection_id(&self) -> Option<&ConnectionId> { + self.0.connection_id.as_ref() + } + pub fn height(&self) -> Height { + self.0.height + } + pub fn set_height(&mut self, height: Height) { + self.0.height = height; + } } impl From for OpenAck { - fn from(attrs: Attributes) -> Self { - OpenAck(attrs) - } + fn from(attrs: Attributes) -> Self { + OpenAck(attrs) + } } impl From for IbcEvent { - fn from(v: OpenAck) -> Self { - IbcEvent::OpenAckConnection(v) - } + fn from(v: OpenAck) -> Self { + IbcEvent::OpenAckConnection(v) + } } impl From for AbciEvent { - fn from(v: OpenAck) -> Self { - let attributes = Vec::::from(v.0); - AbciEvent { - type_str: IbcEventType::OpenAckConnection.as_str().to_string(), - attributes, - } - } + fn from(v: OpenAck) -> Self { + let attributes = Vec::::from(v.0); + AbciEvent { kind: IbcEventType::OpenAckConnection.as_str().to_string(), attributes } + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenConfirm(Attributes); impl OpenConfirm { - pub fn attributes(&self) -> &Attributes { - &self.0 - } - pub fn connection_id(&self) -> Option<&ConnectionId> { - self.0.connection_id.as_ref() - } - pub fn height(&self) -> Height { - self.0.height - } - pub fn set_height(&mut self, height: Height) { - self.0.height = height; - } + pub fn attributes(&self) -> &Attributes { + &self.0 + } + pub fn connection_id(&self) -> Option<&ConnectionId> { + self.0.connection_id.as_ref() + } + pub fn height(&self) -> Height { + self.0.height + } + pub fn set_height(&mut self, height: Height) { + self.0.height = height; + } } impl From for OpenConfirm { - fn from(attrs: Attributes) -> Self { - OpenConfirm(attrs) - } + fn from(attrs: Attributes) -> Self { + OpenConfirm(attrs) + } } impl From for IbcEvent { - fn from(v: OpenConfirm) -> Self { - IbcEvent::OpenConfirmConnection(v) - } + fn from(v: OpenConfirm) -> Self { + IbcEvent::OpenConfirmConnection(v) + } } impl From for AbciEvent { - fn from(v: OpenConfirm) -> Self { - let attributes = Vec::::from(v.0); - AbciEvent { - type_str: IbcEventType::OpenConfirmConnection.as_str().to_string(), - attributes, - } - } + fn from(v: OpenConfirm) -> Self { + let attributes = Vec::::from(v.0); + AbciEvent { kind: IbcEventType::OpenConfirmConnection.as_str().to_string(), attributes } + } } #[cfg(test)] mod test { - use super::*; - - #[test] - fn connection_event_to_abci_event() { - let height = Height::new(1, 1); - let attributes = Attributes { - height, - connection_id: Some("test_connection".parse().unwrap()), - client_id: "test_client".parse().unwrap(), - counterparty_connection_id: Some("counterparty_test_conn".parse().unwrap()), - counterparty_client_id: "counterparty_test_client".parse().unwrap(), - }; - let mut abci_events = vec![]; - let open_init = OpenInit::from(attributes.clone()); - abci_events.push(AbciEvent::from(open_init.clone())); - let open_try = OpenTry::from(attributes.clone()); - abci_events.push(AbciEvent::from(open_try.clone())); - let open_ack = OpenAck::from(attributes.clone()); - abci_events.push(AbciEvent::from(open_ack.clone())); - let open_confirm = OpenConfirm::from(attributes); - abci_events.push(AbciEvent::from(open_confirm.clone())); - - for event in abci_events { - match try_from_tx(&event) { - Some(e) => match e { - IbcEvent::OpenInitConnection(e) => assert_eq!(e.0, open_init.0), - IbcEvent::OpenTryConnection(e) => assert_eq!(e.0, open_try.0), - IbcEvent::OpenAckConnection(e) => assert_eq!(e.0, open_ack.0), - IbcEvent::OpenConfirmConnection(e) => assert_eq!(e.0, open_confirm.0), - _ => panic!("unexpected event type"), - }, - None => panic!("converted event was wrong"), - } - } - } + use super::*; + + #[test] + fn connection_event_to_abci_event() { + let height = Height::new(1, 1); + let attributes = Attributes { + height, + connection_id: Some("test_connection".parse().unwrap()), + client_id: "test_client".parse().unwrap(), + counterparty_connection_id: Some("counterparty_test_conn".parse().unwrap()), + counterparty_client_id: "counterparty_test_client".parse().unwrap(), + }; + let mut abci_events = vec![]; + let open_init = OpenInit::from(attributes.clone()); + abci_events.push(AbciEvent::from(open_init.clone())); + let open_try = OpenTry::from(attributes.clone()); + abci_events.push(AbciEvent::from(open_try.clone())); + let open_ack = OpenAck::from(attributes.clone()); + abci_events.push(AbciEvent::from(open_ack.clone())); + let open_confirm = OpenConfirm::from(attributes); + abci_events.push(AbciEvent::from(open_confirm.clone())); + + for event in abci_events { + match try_from_tx(&event) { + Some(e) => match e { + IbcEvent::OpenInitConnection(e) => assert_eq!(e.0, open_init.0), + IbcEvent::OpenTryConnection(e) => assert_eq!(e.0, open_try.0), + IbcEvent::OpenAckConnection(e) => assert_eq!(e.0, open_ack.0), + IbcEvent::OpenConfirmConnection(e) => assert_eq!(e.0, open_confirm.0), + _ => panic!("unexpected event type"), + }, + None => panic!("converted event was wrong"), + } + } + } } diff --git a/modules/src/core/ics03_connection/handler.rs b/modules/src/core/ics03_connection/handler.rs index b096c32e1d..85e19d0262 100644 --- a/modules/src/core/ics03_connection/handler.rs +++ b/modules/src/core/ics03_connection/handler.rs @@ -1,11 +1,14 @@ //! This module implements the processing logic for ICS3 (connection open handshake) messages. -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::context::ConnectionReader; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::msgs::ConnectionMsg; -use crate::core::ics24_host::identifier::ConnectionId; -use crate::handler::HandlerOutput; +use crate::{ + core::{ + ics03_connection::{connection::ConnectionEnd, error::Error, msgs::ConnectionMsg}, + ics24_host::identifier::ConnectionId, + ics26_routing::context::ReaderContext, + }, + handler::HandlerOutput, +}; +use core::fmt::Debug; pub mod conn_open_ack; pub mod conn_open_confirm; @@ -17,41 +20,38 @@ pub mod verify; /// Defines the possible states of a connection identifier in a `ConnectionResult`. #[derive(Clone, Debug)] pub enum ConnectionIdState { - /// Specifies that the handler allocated a new connection identifier. This happens during the - /// processing of either the `MsgConnectionOpenInit` or `MsgConnectionOpenTry` message. - Generated, + /// Specifies that the handler allocated a new connection identifier. This happens during the + /// processing of either the `MsgConnectionOpenInit` or `MsgConnectionOpenTry` message. + Generated, - /// Specifies that the handler reused a previously-allocated connection identifier. - Reused, + /// Specifies that the handler reused a previously-allocated connection identifier. + Reused, } #[derive(Clone, Debug)] pub struct ConnectionResult { - /// The identifier for the connection which the handler processed. Typically this represents the - /// newly-generated connection id (e.g., when processing `MsgConnectionOpenInit`) or - /// an existing connection id (e.g., for `MsgConnectionOpenAck`). - pub connection_id: ConnectionId, + /// The identifier for the connection which the handler processed. Typically this represents + /// the newly-generated connection id (e.g., when processing `MsgConnectionOpenInit`) or + /// an existing connection id (e.g., for `MsgConnectionOpenAck`). + pub connection_id: ConnectionId, - /// The state of the connection identifier (whether it was newly-generated or not). - pub connection_id_state: ConnectionIdState, + /// The state of the connection identifier (whether it was newly-generated or not). + pub connection_id_state: ConnectionIdState, - /// The connection end, which the handler produced as a result of processing the message. - pub connection_end: ConnectionEnd, + /// The connection end, which the handler produced as a result of processing the message. + pub connection_end: ConnectionEnd, } /// General entry point for processing any type of message related to the ICS3 connection open /// handshake protocol. -pub fn dispatch( - ctx: &Ctx, - msg: ConnectionMsg, -) -> Result, Error> -where - Ctx: ConnectionReader, -{ - match msg { - ConnectionMsg::ConnectionOpenInit(msg) => conn_open_init::process(ctx, msg), - ConnectionMsg::ConnectionOpenTry(msg) => conn_open_try::process(ctx, *msg), - ConnectionMsg::ConnectionOpenAck(msg) => conn_open_ack::process(ctx, *msg), - ConnectionMsg::ConnectionOpenConfirm(msg) => conn_open_confirm::process(ctx, msg), - } +pub fn dispatch( + ctx: &Ctx, + msg: ConnectionMsg, +) -> Result, Error> { + match msg { + ConnectionMsg::ConnectionOpenInit(msg) => conn_open_init::process(ctx, msg), + ConnectionMsg::ConnectionOpenTry(msg) => conn_open_try::process::<_>(ctx, *msg), + ConnectionMsg::ConnectionOpenAck(msg) => conn_open_ack::process::<_>(ctx, *msg), + ConnectionMsg::ConnectionOpenConfirm(msg) => conn_open_confirm::process::<_>(ctx, msg), + } } diff --git a/modules/src/core/ics03_connection/handler/conn_open_ack.rs b/modules/src/core/ics03_connection/handler/conn_open_ack.rs index 086a8a9d12..2cf0fdc6bf 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_ack.rs @@ -1,233 +1,283 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenAck`. -use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; -use crate::core::ics03_connection::context::ConnectionReader; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::events::Attributes; -use crate::core::ics03_connection::handler::verify::{ - check_client_consensus_height, verify_proofs, +use crate::{ + core::{ + ics03_connection::{ + connection::{ConnectionEnd, Counterparty, State}, + error::Error, + events::Attributes, + handler::{ + verify::{ + check_client_consensus_height, verify_client_proof, verify_connection_proof, + verify_consensus_proof, + }, + ConnectionIdState, ConnectionResult, + }, + msgs::conn_open_ack::MsgConnectionOpenAck, + }, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, }; -use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; -use crate::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ConnectionReader, - msg: MsgConnectionOpenAck, + +pub(crate) fn process( + ctx: &Ctx, + msg: MsgConnectionOpenAck, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Check the client's (consensus state) proof height. - check_client_consensus_height(ctx, msg.consensus_height())?; - - // Validate the connection end. - let mut conn_end = ctx.connection_end(&msg.connection_id)?; - // A connection end must be Init or TryOpen; otherwise we return an error. - let state_is_consistent = conn_end.state_matches(&State::Init) - && conn_end.versions().contains(&msg.version) - || conn_end.state_matches(&State::TryOpen) - && conn_end.versions().get(0).eq(&Some(&msg.version)); - - if !state_is_consistent { - // Old connection end is in incorrect state, propagate the error. - return Err(Error::connection_mismatch(msg.connection_id)); - } - - // Set the connection ID of the counterparty - let prev_counterparty = conn_end.counterparty(); - let counterparty = Counterparty::new( - prev_counterparty.client_id().clone(), - Some(msg.counterparty_connection_id.clone()), - prev_counterparty.prefix().clone(), - ); - conn_end.set_state(State::Open); - conn_end.set_version(msg.version.clone()); - conn_end.set_counterparty(counterparty); - - // Proof verification. - let expected_conn = { - // The counterparty is the local chain. - let counterparty = Counterparty::new( - conn_end.client_id().clone(), // The local client identifier. - Some(msg.connection_id.clone()), // This chain's connection id as known on counterparty. - ctx.commitment_prefix(), // Local commitment prefix. - ); - - ConnectionEnd::new( - State::TryOpen, - conn_end.counterparty().client_id().clone(), - counterparty, - vec![msg.version.clone()], - conn_end.delay_period(), - ) - }; - - // 2. Pass the details to the verification function. - verify_proofs( - ctx, - msg.client_state.clone(), - msg.proofs.height(), - &conn_end, - &expected_conn, - &msg.proofs, - )?; - - output.log("success: connection verification passed"); - - let result = ConnectionResult { - connection_id: msg.connection_id, - connection_id_state: ConnectionIdState::Reused, - connection_end: conn_end, - }; - - let event_attributes = Attributes { - connection_id: Some(result.connection_id.clone()), - height: ctx.host_current_height(), - ..Default::default() - }; - output.emit(IbcEvent::OpenAckConnection(event_attributes.into())); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + // Check the client's (consensus state) proof height if it consensus proof is provided + if msg.proofs.consensus_proof().is_some() { + check_client_consensus_height(ctx, msg.consensus_height())?; + } + + // Validate the connection end. + let mut conn_end = ctx.connection_end(&msg.connection_id)?; + // A connection end must be Init or TryOpen; otherwise we return an error. + let state_is_consistent = conn_end.state_matches(&State::Init) && + conn_end.versions().contains(&msg.version) || + conn_end.state_matches(&State::TryOpen) && + conn_end.versions().get(0).eq(&Some(&msg.version)); + + if !state_is_consistent { + // Old connection end is in incorrect state, propagate the error. + return Err(Error::connection_mismatch(msg.connection_id)) + } + + // Set the connection ID of the counterparty + let prev_counterparty = conn_end.counterparty(); + let counterparty = Counterparty::new( + prev_counterparty.client_id().clone(), + Some(msg.counterparty_connection_id.clone()), + prev_counterparty.prefix().clone(), + ); + conn_end.set_state(State::Open); + conn_end.set_version(msg.version.clone()); + conn_end.set_counterparty(counterparty); + + // Proof verification. + let expected_conn = { + // The counterparty is the local chain. + let counterparty = Counterparty::new( + conn_end.client_id().clone(), // The local client identifier. + Some(msg.connection_id.clone()), /* This chain's connection id as known on + * counterparty. */ + ctx.commitment_prefix(), // Local commitment prefix. + ); + + ConnectionEnd::new( + State::TryOpen, + conn_end.counterparty().client_id().clone(), + counterparty, + vec![msg.version.clone()], + conn_end.delay_period(), + ) + }; + + let client_state = msg.client_state.ok_or_else(|| { + Error::implementation_specific("client state is required in connOpenTry".into()) + })?; + + let client_proof = msg.proofs.client_proof().as_ref().ok_or_else(|| { + Error::implementation_specific("client proof is required in connOpenTry".into()) + })?; + + let consensus_proof = msg.proofs.consensus_proof().ok_or_else(|| { + Error::implementation_specific("consensus proof is required in connOpenTry".into()) + })?; + + ctx.validate_self_client(&client_state).map_err(Error::ics02_client)?; + + verify_connection_proof::( + ctx, + msg.proofs.height(), + &conn_end, + &expected_conn, + msg.proofs.height(), + msg.proofs.object_proof(), + )?; + + verify_client_proof::( + ctx, + msg.proofs.height(), + &conn_end, + client_state, + msg.proofs.height(), + client_proof, + )?; + + verify_consensus_proof::(ctx, msg.proofs.height(), &conn_end, &consensus_proof)?; + + output.log("success: connection verification passed"); + + let event_attributes = Attributes { + connection_id: Some(msg.connection_id.clone()), + height: ctx.host_height(), + client_id: conn_end.client_id().clone(), + counterparty_connection_id: conn_end.counterparty().connection_id.clone(), + counterparty_client_id: conn_end.counterparty().client_id().clone(), + }; + + let result = ConnectionResult { + connection_id: msg.connection_id, + connection_id_state: ConnectionIdState::Reused, + connection_end: conn_end, + }; + + output.emit(IbcEvent::OpenAckConnection(event_attributes.into())); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use core::str::FromStr; - use test_log::test; - - use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; - use crate::core::ics03_connection::context::ConnectionReader; - use crate::core::ics03_connection::error; - use crate::core::ics03_connection::handler::{dispatch, ConnectionResult}; - use crate::core::ics03_connection::msgs::conn_open_ack::test_util::get_dummy_raw_msg_conn_open_ack; - use crate::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; - use crate::core::ics03_connection::msgs::ConnectionMsg; - use crate::core::ics23_commitment::commitment::CommitmentPrefix; - use crate::core::ics24_host::identifier::{ChainId, ClientId}; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::mock::host::HostType; - use crate::timestamp::ZERO_DURATION; - - #[test] - fn conn_open_ack_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ConnectionMsg, - want_pass: bool, - match_error: Box, - } - - let msg_ack = - MsgConnectionOpenAck::try_from(get_dummy_raw_msg_conn_open_ack(10, 10)).unwrap(); - let conn_id = msg_ack.connection_id.clone(); - let counterparty_conn_id = msg_ack.counterparty_connection_id.clone(); - - // Client parameters -- identifier and correct height (matching the proof height) - let client_id = ClientId::from_str("mock_clientid").unwrap(); - let proof_height = msg_ack.proofs.height(); - - // Parametrize the host chain to have a height at least as recent as the - // the height of the proofs in the Ack msg. - let latest_height = proof_height.increment(); - let max_history_size = 5; - let default_context = MockContext::new( - ChainId::new("mockgaia".to_string(), latest_height.revision_number), - HostType::Mock, - max_history_size, - latest_height, - ); - - // A connection end that will exercise the successful path. - let default_conn_end = ConnectionEnd::new( - State::Init, - client_id.clone(), - Counterparty::new( - client_id.clone(), - Some(msg_ack.counterparty_connection_id.clone()), - CommitmentPrefix::try_from(b"ibc".to_vec()).unwrap(), - ), - vec![msg_ack.version.clone()], - ZERO_DURATION, - ); - - // A connection end with incorrect state `Open`; will be part of the context. - let mut conn_end_open = default_conn_end.clone(); - conn_end_open.set_state(State::Open); // incorrect field - - let tests: Vec = vec![ - Test { - name: "Successful processing of an Ack message".to_string(), - ctx: default_context - .clone() - .with_client(&client_id, proof_height) - .with_connection(conn_id.clone(), default_conn_end), - msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack.clone())), - want_pass: true, - match_error: Box::new(|_| panic!("should not have error")), - }, - Test { - name: "Processing fails because the connection does not exist in the context" - .to_string(), - ctx: default_context.clone(), - msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack.clone())), - want_pass: false, - match_error: { - let connection_id = conn_id.clone(); - Box::new(move |e| match e.detail() { - error::ErrorDetail::ConnectionNotFound(e) => { - assert_eq!(e.connection_id, connection_id) - } - _ => { - panic!("Expected ConnectionNotFound error"); - } - }) - }, - }, - Test { - name: "Processing fails due to connections mismatch (incorrect 'open' state)" - .to_string(), - ctx: default_context - .with_client(&client_id, proof_height) - .with_connection(conn_id.clone(), conn_end_open), - msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack)), - want_pass: false, - match_error: { - let connection_id = conn_id; - Box::new(move |e| match e.detail() { - error::ErrorDetail::ConnectionMismatch(e) => { - assert_eq!(e.connection_id, connection_id); - } - _ => { - panic!("Expected ConnectionMismatch error"); - } - }) - }, - }, - /* - Test { - name: "Processing fails due to MissingLocalConsensusState".to_string(), - ctx: MockContext::default() - .with_client(&client_id, proof_height) - .with_connection(conn_id, default_conn_end), - msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack)), - want_pass: false, - error_kind: Some(Kind::MissingLocalConsensusState) - }, - */ - ]; - - for test in tests { - let res = dispatch(&test.ctx, test.msg.clone()); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + use crate::prelude::*; + + use core::str::FromStr; + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::{ConnectionEnd, Counterparty, State}, + error, + handler::{dispatch, ConnectionResult}, + msgs::{ + conn_open_ack::{ + test_util::get_dummy_raw_msg_conn_open_ack, MsgConnectionOpenAck, + }, + ConnectionMsg, + }, + }, + ics23_commitment::commitment::CommitmentPrefix, + ics24_host::identifier::{ChainId, ClientId}, + }, + events::IbcEvent, + mock::{ + context::{MockClientTypes, MockContext}, + host::MockHostType, + }, + timestamp::ZERO_DURATION, + }; + + #[test] + fn conn_open_ack_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ConnectionMsg>, + want_pass: bool, + match_error: Box, + } + + let msg_ack = + MsgConnectionOpenAck::try_from(get_dummy_raw_msg_conn_open_ack(10, 10)).unwrap(); + let conn_id = msg_ack.connection_id.clone(); + let counterparty_conn_id = msg_ack.counterparty_connection_id.clone(); + + // Client parameters -- identifier and correct height (matching the proof height) + let client_id = ClientId::from_str("mock_clientid").unwrap(); + let proof_height = msg_ack.proofs.height(); + + // Parametrize the host chain to have a height at least as recent as the + // the height of the proofs in the Ack msg. + let latest_height = proof_height.increment(); + let max_history_size = 5; + let default_context = MockContext::new( + ChainId::new("mockgaia".to_string(), latest_height.revision_number), + MockHostType::Mock, + max_history_size, + latest_height, + ); + + // A connection end that will exercise the successful path. + let default_conn_end = ConnectionEnd::new( + State::Init, + client_id.clone(), + Counterparty::new( + client_id.clone(), + Some(msg_ack.counterparty_connection_id.clone()), + CommitmentPrefix::try_from(b"ibc".to_vec()).unwrap(), + ), + vec![msg_ack.version.clone()], + ZERO_DURATION, + ); + + // A connection end with incorrect state `Open`; will be part of the context. + let mut conn_end_open = default_conn_end.clone(); + conn_end_open.set_state(State::Open); // incorrect field + + let tests: Vec = vec![ + Test { + name: "Successful processing of an Ack message".to_string(), + ctx: default_context + .clone() + .with_client(&client_id, proof_height) + .with_connection(conn_id.clone(), default_conn_end), + msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack.clone())), + want_pass: true, + match_error: Box::new(|_| panic!("should not have error")), + }, + Test { + name: "Processing fails because the connection does not exist in the context" + .to_string(), + ctx: default_context.clone(), + msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack.clone())), + want_pass: false, + match_error: { + let connection_id = conn_id.clone(); + Box::new(move |e| match e.detail() { + error::ErrorDetail::ConnectionNotFound(e) => { + assert_eq!(e.connection_id, connection_id) + }, + _ => { + panic!("Expected ConnectionNotFound error"); + }, + }) + }, + }, + Test { + name: "Processing fails due to connections mismatch (incorrect 'open' state)" + .to_string(), + ctx: default_context + .with_client(&client_id, proof_height) + .with_connection(conn_id.clone(), conn_end_open), + msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack)), + want_pass: false, + match_error: { + let connection_id = conn_id; + Box::new(move |e| match e.detail() { + error::ErrorDetail::ConnectionMismatch(e) => { + assert_eq!(e.connection_id, connection_id); + }, + _ => { + panic!("Expected ConnectionMismatch error"); + }, + }) + }, + }, + /* + Test { + name: "Processing fails due to MissingLocalConsensusState".to_string(), + ctx: MockContext::default() + .with_client(&client_id, proof_height) + .with_connection(conn_id, default_conn_end), + msg: ConnectionMsg::ConnectionOpenAck(Box::new(msg_ack)), + want_pass: false, + error_kind: Some(Kind::MissingLocalConsensusState) + }, + */ + ]; + + for test in tests { + let res = dispatch(&test.ctx, test.msg.clone()); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "conn_open_ack: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -235,37 +285,37 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. + assert!(!proto_output.events.is_empty()); // Some events must exist. - // The object in the output is a ConnectionEnd, should have OPEN state. - let res: ConnectionResult = proto_output.result; - assert_eq!(res.connection_end.state().clone(), State::Open); + // The object in the output is a ConnectionEnd, should have OPEN state. + let res: ConnectionResult = proto_output.result; + assert_eq!(res.connection_end.state().clone(), State::Open); - // assert that counterparty connection id is correct - assert_eq!( - res.connection_end.counterparty().connection_id, - Some(counterparty_conn_id.clone()) - ); + // assert that counterparty connection id is correct + assert_eq!( + res.connection_end.counterparty().connection_id, + Some(counterparty_conn_id.clone()) + ); - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenAckConnection(_))); - assert_eq!(e.height(), test.ctx.host_current_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "conn_open_ack: failed for test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg, - test.ctx.clone(), - e, - ); + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenAckConnection(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "conn_open_ack: failed for test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg, + test.ctx.clone(), + e, + ); - // Verify that the error kind matches - (test.match_error)(e); - } - } - } - } + // Verify that the error kind matches + (test.match_error)(e); + }, + } + } + } } diff --git a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs index 3f32cc3fad..9988bb6b50 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_confirm.rs @@ -1,160 +1,178 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenConfirm`. -use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; -use crate::core::ics03_connection::context::ConnectionReader; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::events::Attributes; -use crate::core::ics03_connection::handler::verify::verify_proofs; -use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; -use crate::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ConnectionReader, - msg: MsgConnectionOpenConfirm, +use crate::{ + core::{ + ics03_connection::{ + connection::{ConnectionEnd, Counterparty, State}, + error::Error, + events::Attributes, + handler::{verify::verify_connection_proof, ConnectionIdState, ConnectionResult}, + msgs::conn_open_confirm::MsgConnectionOpenConfirm, + }, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: MsgConnectionOpenConfirm, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Validate the connection end. - let mut conn_end = ctx.connection_end(&msg.connection_id)?; - // A connection end must be in TryOpen state; otherwise return error. - if !conn_end.state_matches(&State::TryOpen) { - // Old connection end is in incorrect state, propagate the error. - return Err(Error::connection_mismatch(msg.connection_id)); - } - - // Verify proofs. Assemble the connection end as we expect to find it on the counterparty. - let expected_conn = ConnectionEnd::new( - State::Open, - conn_end.counterparty().client_id().clone(), - Counterparty::new( - // The counterparty is the local chain. - conn_end.client_id().clone(), // The local client identifier. - Some(msg.connection_id.clone()), // Local connection id. - ctx.commitment_prefix(), // Local commitment prefix. - ), - conn_end.versions().to_vec(), - conn_end.delay_period(), - ); - - // 2. Pass the details to the verification function. - verify_proofs( - ctx, - None, - msg.proofs.height(), - &conn_end, - &expected_conn, - &msg.proofs, - )?; - - output.log("success: connection verification passed"); - - // Transition our own end of the connection to state OPEN. - conn_end.set_state(State::Open); - - let result = ConnectionResult { - connection_id: msg.connection_id, - connection_id_state: ConnectionIdState::Reused, - connection_end: conn_end, - }; - - let event_attributes = Attributes { - connection_id: Some(result.connection_id.clone()), - height: ctx.host_current_height(), - ..Default::default() - }; - output.emit(IbcEvent::OpenConfirmConnection(event_attributes.into())); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + // Validate the connection end. + let mut conn_end = ctx.connection_end(&msg.connection_id)?; + // A connection end must be in TryOpen state; otherwise return error. + if !conn_end.state_matches(&State::TryOpen) { + // Old connection end is in incorrect state, propagate the error. + return Err(Error::connection_mismatch(msg.connection_id)) + } + + // Verify proofs. Assemble the connection end as we expect to find it on the counterparty. + let expected_conn = ConnectionEnd::new( + State::Open, + conn_end.counterparty().client_id().clone(), + Counterparty::new( + // The counterparty is the local chain. + conn_end.client_id().clone(), // The local client identifier. + Some(msg.connection_id.clone()), // Local connection id. + ctx.commitment_prefix(), // Local commitment prefix. + ), + conn_end.versions().to_vec(), + conn_end.delay_period(), + ); + + // 2. Pass the details to the verification function. + verify_connection_proof::( + ctx, + msg.proofs.height(), + &conn_end, + &expected_conn, + msg.proofs.height(), + msg.proofs.object_proof(), + )?; + + output.log("success: connection verification passed"); + + // Transition our own end of the connection to state OPEN. + conn_end.set_state(State::Open); + + let event_attributes = Attributes { + connection_id: Some(msg.connection_id.clone()), + height: ctx.host_height(), + client_id: conn_end.client_id().clone(), + counterparty_connection_id: conn_end.counterparty().connection_id.clone(), + counterparty_client_id: conn_end.counterparty().client_id().clone(), + }; + + let result = ConnectionResult { + connection_id: msg.connection_id, + connection_id_state: ConnectionIdState::Reused, + connection_end: conn_end, + }; + + output.emit(IbcEvent::OpenConfirmConnection(event_attributes.into())); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use core::str::FromStr; - use test_log::test; - - use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; - use crate::core::ics03_connection::context::ConnectionReader; - use crate::core::ics03_connection::handler::{dispatch, ConnectionResult}; - use crate::core::ics03_connection::msgs::conn_open_confirm::test_util::get_dummy_raw_msg_conn_open_confirm; - use crate::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; - use crate::core::ics03_connection::msgs::ConnectionMsg; - use crate::core::ics23_commitment::commitment::CommitmentPrefix; - use crate::core::ics24_host::identifier::ClientId; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::timestamp::ZERO_DURATION; - use crate::Height; - - #[test] - fn conn_open_confirm_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ConnectionMsg, - want_pass: bool, - } - - let client_id = ClientId::from_str("mock_clientid").unwrap(); - let msg_confirm = - MsgConnectionOpenConfirm::try_from(get_dummy_raw_msg_conn_open_confirm()).unwrap(); - let counterparty = Counterparty::new( - client_id.clone(), - Some(msg_confirm.connection_id.clone()), - CommitmentPrefix::try_from(b"ibc".to_vec()).unwrap(), - ); - - let context = MockContext::default(); - - let incorrect_conn_end_state = ConnectionEnd::new( - State::Init, - client_id.clone(), - counterparty, - context.get_compatible_versions(), - ZERO_DURATION, - ); - - let mut correct_conn_end = incorrect_conn_end_state.clone(); - correct_conn_end.set_state(State::TryOpen); - - let tests: Vec = vec![ - Test { - name: "Processing fails due to missing connection in context".to_string(), - ctx: context.clone(), - msg: ConnectionMsg::ConnectionOpenConfirm(msg_confirm.clone()), - want_pass: false, - }, - Test { - name: "Processing fails due to connections mismatch (incorrect state)".to_string(), - ctx: context - .clone() - .with_client(&client_id, Height::new(0, 10)) - .with_connection(msg_confirm.connection_id.clone(), incorrect_conn_end_state), - msg: ConnectionMsg::ConnectionOpenConfirm(msg_confirm.clone()), - want_pass: false, - }, - Test { - name: "Processing successful".to_string(), - ctx: context - .with_client(&client_id, Height::new(0, 10)) - .with_connection(msg_confirm.connection_id.clone(), correct_conn_end), - msg: ConnectionMsg::ConnectionOpenConfirm(msg_confirm), - want_pass: true, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = dispatch(&test.ctx, test.msg.clone()); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + use crate::prelude::*; + + use core::str::FromStr; + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::{ConnectionEnd, Counterparty, State}, + context::ConnectionReader, + handler::{dispatch, ConnectionResult}, + msgs::{ + conn_open_confirm::{ + test_util::get_dummy_raw_msg_conn_open_confirm, MsgConnectionOpenConfirm, + }, + ConnectionMsg, + }, + }, + ics23_commitment::commitment::CommitmentPrefix, + ics24_host::identifier::ClientId, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + timestamp::ZERO_DURATION, + Height, + }; + + #[test] + fn conn_open_confirm_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ConnectionMsg>, + want_pass: bool, + } + + let client_id = ClientId::from_str("mock_clientid").unwrap(); + let msg_confirm = + MsgConnectionOpenConfirm::try_from(get_dummy_raw_msg_conn_open_confirm()).unwrap(); + let counterparty = Counterparty::new( + client_id.clone(), + Some(msg_confirm.connection_id.clone()), + CommitmentPrefix::try_from(b"ibc".to_vec()).unwrap(), + ); + + let context = MockContext::default(); + + let incorrect_conn_end_state = ConnectionEnd::new( + State::Init, + client_id.clone(), + counterparty, + context.get_compatible_versions(), + ZERO_DURATION, + ); + + let mut correct_conn_end = incorrect_conn_end_state.clone(); + correct_conn_end.set_state(State::TryOpen); + + let tests: Vec = vec![ + Test { + name: "Processing fails due to missing connection in context".to_string(), + ctx: context.clone(), + msg: ConnectionMsg::ConnectionOpenConfirm(msg_confirm.clone()), + want_pass: false, + }, + Test { + name: "Processing fails due to connections mismatch (incorrect state)".to_string(), + ctx: context + .clone() + .with_client(&client_id, Height::new(0, 10)) + .with_connection(msg_confirm.connection_id.clone(), incorrect_conn_end_state), + msg: ConnectionMsg::ConnectionOpenConfirm(msg_confirm.clone()), + want_pass: false, + }, + Test { + name: "Processing successful".to_string(), + ctx: context + .with_client(&client_id, Height::new(0, 10)) + .with_connection(msg_confirm.connection_id.clone(), correct_conn_end), + msg: ConnectionMsg::ConnectionOpenConfirm(msg_confirm), + want_pass: true, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = dispatch(&test.ctx, test.msg.clone()); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "conn_open_confirm: test passed but was supposed to fail for: {}, \nparams {:?} {:?}", test.name, @@ -162,28 +180,28 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - // The object in the output is a ConnectionEnd, should have OPEN state. - let res: ConnectionResult = proto_output.result; - assert_eq!(res.connection_end.state().clone(), State::Open); - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenConfirmConnection(_))); - assert_eq!(e.height(), test.ctx.host_current_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "conn_open_confirm: failed for test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg, - test.ctx.clone(), - e, - ); - } - } - } - } + assert!(!proto_output.events.is_empty()); // Some events must exist. + + // The object in the output is a ConnectionEnd, should have OPEN state. + let res: ConnectionResult = proto_output.result; + assert_eq!(res.connection_end.state().clone(), State::Open); + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenConfirmConnection(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "conn_open_confirm: failed for test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg, + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics03_connection/handler/conn_open_init.rs b/modules/src/core/ics03_connection/handler/conn_open_init.rs index 4cfd3d59ae..01de05a2c8 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_init.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_init.rs @@ -1,188 +1,201 @@ //! Protocol logic specific to ICS3 messages of type `MsgConnectionOpenInit`. -use crate::core::ics03_connection::connection::{ConnectionEnd, State}; -use crate::core::ics03_connection::context::ConnectionReader; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::events::Attributes; -use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; -use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; -use crate::core::ics24_host::identifier::ConnectionId; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ConnectionReader, - msg: MsgConnectionOpenInit, +use crate::{ + core::{ + ics03_connection::{ + connection::{ConnectionEnd, State}, + error::Error, + events::Attributes, + handler::{ConnectionIdState, ConnectionResult}, + msgs::conn_open_init::MsgConnectionOpenInit, + }, + ics24_host::identifier::ConnectionId, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: MsgConnectionOpenInit, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // An IBC client running on the local (host) chain should exist. - ctx.client_state(&msg.client_id)?; - - let versions = match msg.version { - Some(version) => { - if ctx.get_compatible_versions().contains(&version) { - Ok(vec![version]) - } else { - Err(Error::version_not_supported(version)) - } - } - None => Ok(ctx.get_compatible_versions()), - }?; - - let new_connection_end = ConnectionEnd::new( - State::Init, - msg.client_id.clone(), - msg.counterparty.clone(), - versions, - msg.delay_period, - ); - - // Construct the identifier for the new connection. - let id_counter = ctx.connection_counter()?; - let conn_id = ConnectionId::new(id_counter); - - output.log(format!( - "success: generated new connection identifier: {}", - conn_id - )); - - let result = ConnectionResult { - connection_id: conn_id.clone(), - connection_id_state: ConnectionIdState::Generated, - connection_end: new_connection_end, - }; - - let event_attributes = Attributes { - connection_id: Some(conn_id), - height: ctx.host_current_height(), - ..Default::default() - }; - output.emit(IbcEvent::OpenInitConnection(event_attributes.into())); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + // An IBC client running on the local (host) chain should exist. + ctx.client_state(&msg.client_id).map_err(Error::ics02_client)?; + + let versions = match msg.version { + Some(version) => + if ctx.get_compatible_versions().contains(&version) { + Ok(vec![version]) + } else { + Err(Error::version_not_supported(version)) + }, + None => Ok(ctx.get_compatible_versions()), + }?; + + let new_connection_end = ConnectionEnd::new( + State::Init, + msg.client_id.clone(), + msg.counterparty.clone(), + versions, + msg.delay_period, + ); + + // Construct the identifier for the new connection. + let id_counter = ctx.connection_counter()?; + let conn_id = ConnectionId::new(id_counter); + + output.log(format!("success: generated new connection identifier: {}", conn_id)); + + let event_attributes = Attributes { + connection_id: Some(conn_id.clone()), + height: ctx.host_height(), + client_id: new_connection_end.client_id().clone(), + counterparty_connection_id: new_connection_end.counterparty().connection_id.clone(), + counterparty_client_id: new_connection_end.counterparty().client_id().clone(), + }; + + let result = ConnectionResult { + connection_id: conn_id, + connection_id_state: ConnectionIdState::Generated, + connection_end: new_connection_end, + }; + + output.emit(IbcEvent::OpenInitConnection(event_attributes.into())); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use test_log::test; - - use crate::core::ics03_connection::connection::State; - use crate::core::ics03_connection::context::ConnectionReader; - use crate::core::ics03_connection::handler::{dispatch, ConnectionResult}; - use crate::core::ics03_connection::msgs::conn_open_init::test_util::get_dummy_raw_msg_conn_open_init; - use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; - use crate::core::ics03_connection::msgs::ConnectionMsg; - use crate::core::ics03_connection::version::Version; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::prelude::*; - use crate::Height; - - use ibc_proto::ibc::core::connection::v1::Version as RawVersion; - - #[test] - fn conn_open_init_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ConnectionMsg, - expected_versions: Vec, - want_pass: bool, - } - - let msg_conn_init_default = - MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); - let msg_conn_init_no_version = MsgConnectionOpenInit { - version: None, - ..msg_conn_init_default.clone() - }; - let msg_conn_init_bad_version = MsgConnectionOpenInit { - version: Version::try_from(RawVersion { - identifier: "random identifier 424242".to_string(), - features: vec![], - }) - .unwrap() - .into(), - ..msg_conn_init_default.clone() - }; - let default_context = MockContext::default(); - let good_context = default_context - .clone() - .with_client(&msg_conn_init_default.client_id, Height::new(0, 10)); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no client exists in the context".to_string(), - ctx: default_context, - msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_default.clone()), - expected_versions: vec![msg_conn_init_default.version.clone().unwrap()], - want_pass: false, - }, - Test { - name: "Incompatible version in MsgConnectionOpenInit msg".to_string(), - ctx: good_context.clone(), - msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_bad_version), - expected_versions: vec![], - want_pass: false, - }, - Test { - name: "No version in MsgConnectionOpenInit msg".to_string(), - ctx: good_context.clone(), - msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_no_version), - expected_versions: good_context.get_compatible_versions(), - want_pass: true, - }, - Test { - name: "Good parameters".to_string(), - ctx: good_context, - msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_default.clone()), - expected_versions: vec![msg_conn_init_default.version.unwrap()], - want_pass: true, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = dispatch(&test.ctx, test.msg.clone()); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!(!proto_output.events.is_empty()); // Some events must exist. - - // The object in the output is a ConnectionEnd, should have init state. - let res: ConnectionResult = proto_output.result; - assert_eq!(res.connection_end.state().clone(), State::Init); - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenInitConnection(_))); - assert_eq!(e.height(), test.ctx.host_current_height()); - } - - assert_eq!(res.connection_end.versions(), test.expected_versions); - - // This needs to be last - assert!( + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::State, + context::ConnectionReader, + handler::{dispatch, ConnectionResult}, + msgs::{ + conn_open_init::{ + test_util::get_dummy_raw_msg_conn_open_init, MsgConnectionOpenInit, + }, + ConnectionMsg, + }, + version::Version, + }, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + prelude::*, + Height, + }; + + use ibc_proto::ibc::core::connection::v1::Version as RawVersion; + + #[test] + fn conn_open_init_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ConnectionMsg>, + expected_versions: Vec, + want_pass: bool, + } + + let msg_conn_init_default = + MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); + let msg_conn_init_no_version = + MsgConnectionOpenInit { version: None, ..msg_conn_init_default.clone() }; + let msg_conn_init_bad_version = MsgConnectionOpenInit { + version: Version::try_from(RawVersion { + identifier: "random identifier 424242".to_string(), + features: vec![], + }) + .unwrap() + .into(), + ..msg_conn_init_default.clone() + }; + let default_context = MockContext::default(); + let good_context = default_context + .clone() + .with_client(&msg_conn_init_default.client_id, Height::new(0, 10)); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no client exists in the context".to_string(), + ctx: default_context, + msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_default.clone()), + expected_versions: vec![msg_conn_init_default.version.clone().unwrap()], + want_pass: false, + }, + Test { + name: "Incompatible version in MsgConnectionOpenInit msg".to_string(), + ctx: good_context.clone(), + msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_bad_version), + expected_versions: vec![], + want_pass: false, + }, + Test { + name: "No version in MsgConnectionOpenInit msg".to_string(), + ctx: good_context.clone(), + msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_no_version), + expected_versions: good_context.get_compatible_versions(), + want_pass: true, + }, + Test { + name: "Good parameters".to_string(), + ctx: good_context, + msg: ConnectionMsg::ConnectionOpenInit(msg_conn_init_default.clone()), + expected_versions: vec![msg_conn_init_default.version.unwrap()], + want_pass: true, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = dispatch(&test.ctx, test.msg.clone()); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!(!proto_output.events.is_empty()); // Some events must exist. + + // The object in the output is a ConnectionEnd, should have init state. + let res: ConnectionResult = proto_output.result; + assert_eq!(res.connection_end.state().clone(), State::Init); + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenInitConnection(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + + assert_eq!(res.connection_end.versions(), test.expected_versions); + + // This needs to be last + assert!( test.want_pass, "conn_open_init: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, test.msg.clone(), test.ctx.clone() ); - } - Err(e) => { - assert!( - !test.want_pass, - "conn_open_init: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg, - test.ctx.clone(), - e, - ); - } - } - } - } + }, + Err(e) => { + assert!( + !test.want_pass, + "conn_open_init: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg, + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics03_connection/handler/conn_open_try.rs b/modules/src/core/ics03_connection/handler/conn_open_try.rs index b9b2132029..172d1f80a9 100644 --- a/modules/src/core/ics03_connection/handler/conn_open_try.rs +++ b/modules/src/core/ics03_connection/handler/conn_open_try.rs @@ -1,194 +1,211 @@ //! Protocol logic specific to processing ICS3 messages of type `MsgConnectionOpenTry`. -use crate::core::ics03_connection::connection::{ConnectionEnd, Counterparty, State}; -use crate::core::ics03_connection::context::ConnectionReader; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::events::Attributes; -use crate::core::ics03_connection::handler::verify::{ - check_client_consensus_height, verify_proofs, +use crate::{ + core::{ + ics03_connection::{ + connection::{ConnectionEnd, Counterparty, State}, + error::Error, + events::Attributes, + handler::{ + verify::{ + check_client_consensus_height, verify_client_proof, verify_connection_proof, + verify_consensus_proof, + }, + ConnectionIdState, ConnectionResult, + }, + msgs::conn_open_try::MsgConnectionOpenTry, + }, + ics24_host::identifier::ConnectionId, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, }; -use crate::core::ics03_connection::handler::{ConnectionIdState, ConnectionResult}; -use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; -use crate::core::ics24_host::identifier::ConnectionId; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ConnectionReader, - msg: MsgConnectionOpenTry, + +pub(crate) fn process( + ctx: &Ctx, + msg: MsgConnectionOpenTry, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Check that consensus height (for client proof) in message is not too advanced nor too old. - check_client_consensus_height(ctx, msg.consensus_height())?; - - // Unwrap the old connection end (if any) and its identifier. - let (mut new_connection_end, conn_id) = match &msg.previous_connection_id { - // A connection with this id should already exist. Search & validate. - Some(prev_id) => { - let old_connection_end = ctx.connection_end(prev_id)?; - - // Validate that existing connection end matches with the one we're trying to establish. - if old_connection_end.state_matches(&State::Init) - && old_connection_end.counterparty_matches(&msg.counterparty) - && old_connection_end.client_id_matches(&msg.client_id) - && old_connection_end.delay_period() == msg.delay_period - { - // A ConnectionEnd already exists and all validation passed. - output.log(format!( - "success: `previous_connection_id` {} validation passed", - prev_id - )); - Ok((old_connection_end, prev_id.clone())) - } else { - // A ConnectionEnd already exists and validation failed. - Err(Error::connection_mismatch(prev_id.clone())) - } - } - // No prev. connection id was supplied, create a new connection end and conn id. - None => { - // Build a new connection end as well as an identifier. - let conn_end = ConnectionEnd::new( - State::Init, - msg.client_id.clone(), - msg.counterparty.clone(), - msg.counterparty_versions.clone(), - msg.delay_period, - ); - let id_counter = ctx.connection_counter()?; - let conn_id = ConnectionId::new(id_counter); - - output.log(format!( - "success: new connection end and identifier {} generated", - conn_id - )); - Ok((conn_end, conn_id)) - } - }?; - - // Proof verification in two steps: - // 1. Setup: build the ConnectionEnd as we expect to find it on the other party. - let expected_conn = ConnectionEnd::new( - State::Init, - msg.counterparty.client_id().clone(), - Counterparty::new(msg.client_id.clone(), None, ctx.commitment_prefix()), - msg.counterparty_versions.clone(), - msg.delay_period, - ); - - // 2. Pass the details to the verification function. - verify_proofs( - ctx, - msg.client_state.clone(), - msg.proofs.height(), - &new_connection_end, - &expected_conn, - &msg.proofs, - )?; - - // Transition the connection end to the new state & pick a version. - new_connection_end.set_state(State::TryOpen); - - // Pick the version. - new_connection_end.set_version(ctx.pick_version( - ctx.get_compatible_versions(), - msg.counterparty_versions.clone(), - )?); - - assert_eq!(new_connection_end.versions().len(), 1); - - output.log("success: connection verification passed"); - - let result = ConnectionResult { - connection_id: conn_id.clone(), - connection_id_state: if matches!(msg.previous_connection_id, None) { - ConnectionIdState::Generated - } else { - ConnectionIdState::Reused - }, - connection_end: new_connection_end, - }; - - let event_attributes = Attributes { - connection_id: Some(conn_id), - height: ctx.host_current_height(), - ..Default::default() - }; - output.emit(IbcEvent::OpenTryConnection(event_attributes.into())); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + // Check that consensus height if provided (for client proof) in message is not too advanced nor + // too old. + if msg.proofs.consensus_proof().is_some() { + check_client_consensus_height(ctx, msg.consensus_height())?; + } + + // Unwrap the old connection end (if any) and its identifier. + let (mut new_connection_end, conn_id) = { + // Build a new connection end as well as an identifier. + let conn_end = ConnectionEnd::new( + State::Init, + msg.client_id.clone(), + msg.counterparty.clone(), + msg.counterparty_versions.clone(), + msg.delay_period, + ); + let id_counter = ctx.connection_counter()?; + let conn_id = ConnectionId::new(id_counter); + + output.log(format!("success: new connection end and identifier {} generated", conn_id)); + (conn_end, conn_id) + }; + + // Proof verification in two steps: + // 1. Setup: build the ConnectionEnd as we expect to find it on the other party. + let expected_conn = ConnectionEnd::new( + State::Init, + msg.counterparty.client_id().clone(), + Counterparty::new(msg.client_id.clone(), None, ctx.commitment_prefix()), + msg.counterparty_versions.clone(), + msg.delay_period, + ); + + let client_state = msg.client_state.ok_or_else(|| { + Error::implementation_specific("client state is required in connOpenTry".into()) + })?; + + let client_proof = msg.proofs.client_proof().as_ref().ok_or_else(|| { + Error::implementation_specific("client proof is required in connOpenTry".into()) + })?; + + let consensus_proof = msg.proofs.consensus_proof().ok_or_else(|| { + Error::implementation_specific("consensus proof is required in connOpenTry".into()) + })?; + + ctx.validate_self_client(&client_state).map_err(Error::ics02_client)?; + + verify_connection_proof::<_>( + ctx, + msg.proofs.height(), + &new_connection_end, + &expected_conn, + msg.proofs.height(), + msg.proofs.object_proof(), + )?; + + verify_client_proof::<_>( + ctx, + msg.proofs.height(), + &new_connection_end, + client_state, + msg.proofs.height(), + client_proof, + )?; + + verify_consensus_proof::<_>(ctx, msg.proofs.height(), &new_connection_end, &consensus_proof)?; + + // Transition the connection end to the new state & pick a version. + new_connection_end.set_state(State::TryOpen); + + // Pick the version. + new_connection_end.set_version( + ctx.pick_version(ctx.get_compatible_versions(), msg.counterparty_versions.clone())?, + ); + + assert_eq!(new_connection_end.versions().len(), 1); + + output.log("success: connection verification passed"); + + let event_attributes = Attributes { + connection_id: Some(conn_id.clone()), + height: ctx.host_height(), + client_id: new_connection_end.client_id().clone(), + counterparty_connection_id: new_connection_end.counterparty().connection_id.clone(), + counterparty_client_id: new_connection_end.counterparty().client_id().clone(), + }; + + let result = ConnectionResult { + connection_id: conn_id, + connection_id_state: ConnectionIdState::Generated, + connection_end: new_connection_end, + }; + + output.emit(IbcEvent::OpenTryConnection(event_attributes.into())); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use crate::core::ics03_connection::connection::State; - use crate::core::ics03_connection::context::ConnectionReader; - use crate::core::ics03_connection::handler::{dispatch, ConnectionResult}; - use crate::core::ics03_connection::msgs::conn_open_try::test_util::get_dummy_raw_msg_conn_open_try; - use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; - use crate::core::ics03_connection::msgs::ConnectionMsg; - use crate::core::ics24_host::identifier::ChainId; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::mock::host::HostType; - use crate::Height; - - #[test] - fn conn_open_try_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ConnectionMsg, - want_pass: bool, - } - - let host_chain_height = Height::new(0, 35); - let max_history_size = 5; - let context = MockContext::new( - ChainId::new("mockgaia".to_string(), 0), - HostType::Mock, - max_history_size, - host_chain_height, - ); - let client_consensus_state_height = 10; - - let msg_conn_try = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( - client_consensus_state_height, - host_chain_height.revision_height, - )) - .unwrap(); - - // The proof targets a height that does not exist (i.e., too advanced) on destination chain. - let msg_height_advanced = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( - client_consensus_state_height, - host_chain_height.increment().revision_height, - )) - .unwrap(); - let pruned_height = host_chain_height - .sub(max_history_size as u64 + 1) - .unwrap() - .revision_height; - // The consensus proof targets a missing height (pruned) on destination chain. - let msg_height_old = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( - client_consensus_state_height, - pruned_height, - )) - .unwrap(); - - // The proofs in this message are created at a height which the client on destination chain does not have. - let msg_proof_height_missing = - MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( - client_consensus_state_height - 1, - host_chain_height.revision_height, - )) - .unwrap(); - - let tests: Vec = vec![ + use crate::prelude::*; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::State, + handler::{dispatch, ConnectionResult}, + msgs::{ + conn_open_try::{ + test_util::get_dummy_raw_msg_conn_open_try, MsgConnectionOpenTry, + }, + ConnectionMsg, + }, + }, + ics24_host::identifier::ChainId, + }, + events::IbcEvent, + mock::{ + context::{MockClientTypes, MockContext}, + host::MockHostType, + }, + Height, + }; + + #[test] + fn conn_open_try_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ConnectionMsg>, + want_pass: bool, + } + + let host_chain_height = Height::new(0, 35); + let max_history_size = 5; + let context = MockContext::new( + ChainId::new("mockgaia".to_string(), 0), + MockHostType::Mock, + max_history_size, + host_chain_height, + ); + let client_consensus_state_height = 10; + + let msg_conn_try = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( + client_consensus_state_height, + host_chain_height.revision_height, + )) + .unwrap(); + + // The proof targets a height that does not exist (i.e., too advanced) on destination chain. + let msg_height_advanced = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( + client_consensus_state_height, + host_chain_height.increment().revision_height, + )) + .unwrap(); + let pruned_height = + host_chain_height.sub(max_history_size as u64 + 1).unwrap().revision_height; + // The consensus proof targets a missing height (pruned) on destination chain. + let msg_height_old = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( + client_consensus_state_height, + pruned_height, + )) + .unwrap(); + + // The proofs in this message are created at a height which the client on destination chain + // does not have. + let msg_proof_height_missing = + MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( + client_consensus_state_height - 1, + host_chain_height.revision_height, + )) + .unwrap(); + + let tests: Vec = vec![ Test { name: "Processing fails because the height is too advanced".to_string(), ctx: context.clone(), @@ -213,28 +230,16 @@ mod tests { msg: ConnectionMsg::ConnectionOpenTry(Box::new(msg_proof_height_missing)), want_pass: false, }, - Test { - name: "Good parameters but has previous_connection_id".to_string(), - ctx: context.clone().with_client(&msg_conn_try.client_id, Height::new(0, client_consensus_state_height)), - msg: ConnectionMsg::ConnectionOpenTry(Box::new(msg_conn_try.clone())), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context.with_client(&msg_conn_try.client_id, Height::new(0, client_consensus_state_height)), - msg: ConnectionMsg::ConnectionOpenTry(Box::new(msg_conn_try.with_previous_connection_id(None))), - want_pass: true, - }, ] .into_iter() .collect(); - for test in tests { - let res = dispatch(&test.ctx, test.msg.clone()); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + for test in tests { + let res = dispatch(&test.ctx, test.msg.clone()); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "conn_open_try: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -242,28 +247,28 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - // The object in the output is a ConnectionEnd, should have TryOpen state. - let res: ConnectionResult = proto_output.result; - assert_eq!(res.connection_end.state().clone(), State::TryOpen); - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenTryConnection(_))); - assert_eq!(e.height(), test.ctx.host_current_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "conn_open_try: failed for test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg, - test.ctx.clone(), - e, - ); - } - } - } - } + assert!(!proto_output.events.is_empty()); // Some events must exist. + + // The object in the output is a ConnectionEnd, should have TryOpen state. + let res: ConnectionResult = proto_output.result; + assert_eq!(res.connection_end.state().clone(), State::TryOpen); + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenTryConnection(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "conn_open_try: failed for test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg, + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics03_connection/handler/verify.rs b/modules/src/core/ics03_connection/handler/verify.rs index 71e6ee65d3..2b96fa5de3 100644 --- a/modules/src/core/ics03_connection/handler/verify.rs +++ b/modules/src/core/ics03_connection/handler/verify.rs @@ -1,99 +1,67 @@ //! ICS3 verification functions, common across all four handlers of ICS3. -use crate::core::ics02_client::client_consensus::ConsensusState; -use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; -use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::context::ConnectionReader; -use crate::core::ics03_connection::error::Error; -use crate::core::ics23_commitment::commitment::CommitmentProofBytes; -use crate::proofs::{ConsensusProof, Proofs}; -use crate::Height; - -/// Entry point for verifying all proofs bundled in any ICS3 message. -pub fn verify_proofs( - ctx: &dyn ConnectionReader, - client_state: Option, - height: Height, - connection_end: &ConnectionEnd, - expected_conn: &ConnectionEnd, - proofs: &Proofs, -) -> Result<(), Error> { - verify_connection_proof( - ctx, - height, - connection_end, - expected_conn, - proofs.height(), - proofs.object_proof(), - )?; - - // If the message includes a client state, then verify the proof for that state. - if let Some(expected_client_state) = client_state { - verify_client_proof( - ctx, - height, - connection_end, - expected_client_state, - proofs.height(), - proofs - .client_proof() - .as_ref() - .ok_or_else(Error::null_client_proof)?, - )?; - } - - // If a consensus proof is attached to the message, then verify it. - if let Some(proof) = proofs.consensus_proof() { - Ok(verify_consensus_proof(ctx, height, connection_end, &proof)?) - } else { - Ok(()) - } -} +use crate::core::ics02_client::{ + client_consensus::ConsensusState, client_def::ClientDef, client_state::ClientState, +}; + +use crate::{ + core::{ + ics03_connection::{connection::ConnectionEnd, error::Error}, + ics23_commitment::commitment::CommitmentProofBytes, + ics26_routing::context::ReaderContext, + }, + proofs::ConsensusProof, + Height, +}; +use alloc::{format, vec::Vec}; /// Verifies the authenticity and semantic correctness of a commitment `proof`. The commitment /// claims to prove that an object of type connection exists on the source chain (i.e., the chain /// which created this proof). This object must match the state of `expected_conn`. -pub fn verify_connection_proof( - ctx: &dyn ConnectionReader, - height: Height, - connection_end: &ConnectionEnd, - expected_conn: &ConnectionEnd, - proof_height: Height, - proof: &CommitmentProofBytes, +pub fn verify_connection_proof( + ctx: &Ctx, + height: Height, + connection_end: &ConnectionEnd, + expected_conn: &ConnectionEnd, + proof_height: Height, + proof: &CommitmentProofBytes, ) -> Result<(), Error> { - // Fetch the client state (IBC client on the local/host chain). - let client_state = ctx.client_state(connection_end.client_id())?; - - // The client must not be frozen. - if client_state.is_frozen() { - return Err(Error::frozen_client(connection_end.client_id().clone())); - } - - // The client must have the consensus state for the height where this proof was created. - let consensus_state = ctx.client_consensus_state(connection_end.client_id(), proof_height)?; - - // A counterparty connection id of None causes `unwrap()` below and indicates an internal - // error as this is the connection id on the counterparty chain that must always be present. - let connection_id = connection_end - .counterparty() - .connection_id() - .ok_or_else(Error::invalid_counterparty)?; - - let client_def = AnyClient::from_client_type(client_state.client_type()); - - // Verify the proof for the connection state against the expected connection end. - client_def - .verify_connection_state( - &client_state, - height, - connection_end.counterparty().prefix(), - proof, - consensus_state.root(), - connection_id, - expected_conn, - ) - .map_err(Error::verify_connection_state) + // Fetch the client state (IBC client on the local/host chain). + let client_state = ctx.client_state(connection_end.client_id()).map_err(Error::ics02_client)?; + + // The client must not be frozen. + if client_state.is_frozen() { + return Err(Error::frozen_client(connection_end.client_id().clone())) + } + + // The client must have the consensus state for the height where this proof was created. + let consensus_state = ctx + .consensus_state(connection_end.client_id(), proof_height) + .map_err(|e| Error::consensus_state_verification_failure(proof_height, e))?; + + // A counterparty connection id of None causes `unwrap()` below and indicates an internal + // error as this is the connection id on the counterparty chain that must always be present. + let connection_id = connection_end + .counterparty() + .connection_id() + .ok_or_else(Error::invalid_counterparty)?; + + let client_def = client_state.client_def(); + + // Verify the proof for the connection state against the expected connection end. + client_def + .verify_connection_state( + ctx, + connection_end.client_id(), + &client_state, + height, + connection_end.counterparty().prefix(), + proof, + consensus_state.root(), + connection_id, + expected_conn, + ) + .map_err(Error::verify_connection_state) } /// Verifies the client `proof` from a connection handshake message, typically from a @@ -103,96 +71,129 @@ pub fn verify_connection_proof( /// complete verification: that the client state the counterparty stores is valid (i.e., not frozen, /// at the same revision as the current chain, with matching chain identifiers, etc) and that the /// `proof` is correct. -pub fn verify_client_proof( - ctx: &dyn ConnectionReader, - height: Height, - connection_end: &ConnectionEnd, - expected_client_state: AnyClientState, - proof_height: Height, - proof: &CommitmentProofBytes, +pub fn verify_client_proof( + ctx: &Ctx, + height: Height, + connection_end: &ConnectionEnd, + expected_client_state: Ctx::AnyClientState, + proof_height: Height, + proof: &CommitmentProofBytes, ) -> Result<(), Error> { - // Fetch the local client state (IBC client running on the host chain). - let client_state = ctx.client_state(connection_end.client_id())?; - - if client_state.is_frozen() { - return Err(Error::frozen_client(connection_end.client_id().clone())); - } - - let consensus_state = ctx.client_consensus_state(connection_end.client_id(), proof_height)?; - - let client_def = AnyClient::from_client_type(client_state.client_type()); - - client_def - .verify_client_full_state( - &client_state, - height, - connection_end.counterparty().prefix(), - proof, - consensus_state.root(), - connection_end.counterparty().client_id(), - &expected_client_state, - ) - .map_err(|e| { - Error::client_state_verification_failure(connection_end.client_id().clone(), e) - }) + // Fetch the local client state (IBC client running on the host chain). + let client_state = ctx.client_state(connection_end.client_id()).map_err(Error::ics02_client)?; + + if client_state.is_frozen() { + return Err(Error::frozen_client(connection_end.client_id().clone())) + } + + let consensus_state = ctx + .consensus_state(connection_end.client_id(), proof_height) + .map_err(|e| Error::consensus_state_verification_failure(proof_height, e))?; + + let client_def = client_state.client_def(); + + client_def + .verify_client_full_state( + ctx, + &client_state, + height, + connection_end.counterparty().prefix(), + proof, + consensus_state.root(), + connection_end.counterparty().client_id(), + &expected_client_state, + ) + .map_err(|e| { + Error::client_state_verification_failure(connection_end.client_id().clone(), e) + }) } -pub fn verify_consensus_proof( - ctx: &dyn ConnectionReader, - height: Height, - connection_end: &ConnectionEnd, - proof: &ConsensusProof, +pub fn verify_consensus_proof( + ctx: &Ctx, + height: Height, + connection_end: &ConnectionEnd, + proof: &ConsensusProof, ) -> Result<(), Error> { - // Fetch the client state (IBC client on the local chain). - let client_state = ctx.client_state(connection_end.client_id())?; - - if client_state.is_frozen() { - return Err(Error::frozen_client(connection_end.client_id().clone())); - } - - // Fetch the expected consensus state from the historical (local) header data. - let expected_consensus = ctx.host_consensus_state(proof.height())?; - - let consensus_state = ctx.client_consensus_state(connection_end.client_id(), height)?; - - let client = AnyClient::from_client_type(client_state.client_type()); - - client - .verify_client_consensus_state( - &client_state, - height, - connection_end.counterparty().prefix(), - proof.proof(), - consensus_state.root(), - connection_end.counterparty().client_id(), - proof.height(), - &expected_consensus, - ) - .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e)) + // Fetch the client state (IBC client on the local chain). + let client_state = ctx.client_state(connection_end.client_id()).map_err(Error::ics02_client)?; + + if client_state.is_frozen() { + return Err(Error::frozen_client(connection_end.client_id().clone())) + } + + let consensus_state = ctx + .consensus_state(connection_end.client_id(), height) + .map_err(|e| Error::consensus_state_verification_failure(height, e))?; + + let client = client_state.client_def(); + + // todo: we can remove this hack, once this is merged https://github.com/cosmos/ibc/pull/839 + let (consensus_proof, expected_consensus) = match ctx.host_client_type() { + client_type if client_type.contains("beefy") || client_type.contains("near") => { + #[derive(codec::Decode)] + struct ConsensusProofwithHostConsensusStateProof { + host_consensus_state_proof: Vec, + consensus_proof: Vec, + } + // if the host is beefy or near, we need to decode the proof before passing it on. + let connection_proof: ConsensusProofwithHostConsensusStateProof = + codec::Decode::decode(&mut proof.proof().as_bytes()).map_err(|e| { + Error::implementation_specific(format!("failed to decode: {:?}", e)) + })?; + // Fetch the expected consensus state from the historical (local) header data. + let expected_consensus = ctx + .host_consensus_state( + proof.height(), + Some(connection_proof.host_consensus_state_proof), + ) + .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e))?; + ( + CommitmentProofBytes::try_from(connection_proof.consensus_proof).map_err(|e| { + Error::implementation_specific(format!("empty proof bytes: {:?}", e)) + })?, + expected_consensus, + ) + }, + _ => ( + proof.proof().clone(), + ctx.host_consensus_state(proof.height(), None) + .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e))?, + ), + }; + + client + .verify_client_consensus_state( + ctx, + &client_state, + height, + connection_end.counterparty().prefix(), + &consensus_proof, + consensus_state.root(), + connection_end.counterparty().client_id(), + proof.height(), + &expected_consensus, + ) + .map_err(|e| Error::consensus_state_verification_failure(proof.height(), e))?; + + Ok(()) } /// Checks that `claimed_height` is within normal bounds, i.e., fresh enough so that the chain has /// not pruned it yet, but not newer than the current (actual) height of the local chain. -pub fn check_client_consensus_height( - ctx: &dyn ConnectionReader, - claimed_height: Height, +pub fn check_client_consensus_height( + ctx: &Ctx, + claimed_height: Height, ) -> Result<(), Error> { - if claimed_height > ctx.host_current_height() { - // Fail if the consensus height is too advanced. - return Err(Error::invalid_consensus_height( - claimed_height, - ctx.host_current_height(), - )); - } - - if claimed_height < ctx.host_oldest_height() { - // Fail if the consensus height is too old (has been pruned). - return Err(Error::stale_consensus_height( - claimed_height, - ctx.host_oldest_height(), - )); - } - - // Height check is within normal bounds, check passes. - Ok(()) + if claimed_height > ctx.host_height() { + // Fail if the consensus height is too advanced. + return Err(Error::invalid_consensus_height(claimed_height, ctx.host_height())) + } + + if claimed_height < ctx.host_oldest_height() { + // Fail if the consensus height is too old (has been pruned). + return Err(Error::stale_consensus_height(claimed_height, ctx.host_oldest_height())) + } + + // Height check is within normal bounds, check passes. + Ok(()) } diff --git a/modules/src/core/ics03_connection/msgs.rs b/modules/src/core/ics03_connection/msgs.rs index ab2cbec661..94fd350969 100644 --- a/modules/src/core/ics03_connection/msgs.rs +++ b/modules/src/core/ics03_connection/msgs.rs @@ -12,11 +12,13 @@ //! Another difference to ICS3 specs is that each message comprises an additional field called //! `signer` which is specific to Cosmos-SDK. -use crate::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; -use crate::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; -use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; -use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; +use crate::core::ics02_client::context::ClientTypes; +use crate::core::ics03_connection::msgs::{ + conn_open_ack::MsgConnectionOpenAck, conn_open_confirm::MsgConnectionOpenConfirm, + conn_open_init::MsgConnectionOpenInit, conn_open_try::MsgConnectionOpenTry, +}; use alloc::boxed::Box; +use core::fmt::Debug; pub mod conn_open_ack; pub mod conn_open_confirm; @@ -25,28 +27,32 @@ pub mod conn_open_try; /// Enumeration of all possible messages that the ICS3 protocol processes. #[derive(Clone, Debug, PartialEq, Eq)] -pub enum ConnectionMsg { - ConnectionOpenInit(MsgConnectionOpenInit), - ConnectionOpenTry(Box), - ConnectionOpenAck(Box), - ConnectionOpenConfirm(MsgConnectionOpenConfirm), +pub enum ConnectionMsg +where + C: ClientTypes + Clone + Debug + PartialEq + Eq, +{ + ConnectionOpenInit(MsgConnectionOpenInit), + ConnectionOpenTry(Box>), + ConnectionOpenAck(Box>), + ConnectionOpenConfirm(MsgConnectionOpenConfirm), } #[cfg(test)] pub mod test_util { - use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; - use crate::prelude::*; - use ibc_proto::ibc::core::commitment::v1::MerklePrefix; - use ibc_proto::ibc::core::connection::v1::Counterparty as RawCounterparty; + use crate::{ + core::ics24_host::identifier::{ClientId, ConnectionId}, + prelude::*, + }; + use ibc_proto::ibc::core::{ + commitment::v1::MerklePrefix, connection::v1::Counterparty as RawCounterparty, + }; - pub fn get_dummy_raw_counterparty() -> RawCounterparty { - RawCounterparty { - client_id: ClientId::default().to_string(), - connection_id: ConnectionId::default().to_string(), - prefix: Some(MerklePrefix { - key_prefix: b"ibc".to_vec(), - }), - } - } + pub fn get_dummy_raw_counterparty() -> RawCounterparty { + RawCounterparty { + client_id: ClientId::default().to_string(), + connection_id: ConnectionId::default().to_string(), + prefix: Some(MerklePrefix { key_prefix: b"ibc".to_vec() }), + } + } } diff --git a/modules/src/core/ics03_connection/msgs/conn_open_ack.rs b/modules/src/core/ics03_connection/msgs/conn_open_ack.rs index 60a42712ff..a428743863 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_ack.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_ack.rs @@ -1,265 +1,283 @@ use crate::prelude::*; +use core::fmt::Display; -use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenAck as RawMsgConnectionOpenAck; +use crate::core::ics02_client; +use ibc_proto::{ + google::protobuf::Any, + ibc::core::connection::{v1, v1::MsgConnectionOpenAck as RawMsgConnectionOpenAck}, +}; use tendermint_proto::Protobuf; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::version::Version; -use crate::core::ics23_commitment::commitment::CommitmentProofBytes; -use crate::core::ics24_host::identifier::ConnectionId; -use crate::proofs::{ConsensusProof, Proofs}; -use crate::signer::Signer; -use crate::tx_msg::Msg; -use crate::Height; +use crate::core::ics02_client::context::ClientTypes; +use crate::{ + core::{ + ics03_connection::{error::Error, version::Version}, + ics23_commitment::commitment::CommitmentProofBytes, + ics24_host::identifier::ConnectionId, + }, + proofs::{ConsensusProof, Proofs}, + signer::Signer, + tx_msg::Msg, + Height, +}; pub const TYPE_URL: &str = "/ibc.core.connection.v1.MsgConnectionOpenAck"; /// Message definition `MsgConnectionOpenAck` (i.e., `ConnOpenAck` datagram). #[derive(Clone, Debug, PartialEq, Eq)] -pub struct MsgConnectionOpenAck { - pub connection_id: ConnectionId, - pub counterparty_connection_id: ConnectionId, - pub client_state: Option, - pub proofs: Proofs, - pub version: Version, - pub signer: Signer, +pub struct MsgConnectionOpenAck { + pub connection_id: ConnectionId, + pub counterparty_connection_id: ConnectionId, + pub client_state: Option, + pub proofs: Proofs, + pub version: Version, + pub signer: Signer, } -impl MsgConnectionOpenAck { - /// Getter for accessing the `consensus_height` field from this message. Returns the special - /// value `Height(0)` if this field is not set. - pub fn consensus_height(&self) -> Height { - match self.proofs.consensus_proof() { - None => Height::zero(), - Some(p) => p.height(), - } - } +impl MsgConnectionOpenAck { + /// Getter for accessing the `consensus_height` field from this message. Returns the special + /// value `Height(0)` if this field is not set. + pub fn consensus_height(&self) -> Height { + match self.proofs.consensus_proof() { + None => Height::zero(), + Some(p) => p.height(), + } + } } -impl Msg for MsgConnectionOpenAck { - type ValidationError = Error; - type Raw = RawMsgConnectionOpenAck; +impl Msg for MsgConnectionOpenAck +where + C: ClientTypes + Clone, + Any: From, +{ + type ValidationError = Error; + type Raw = RawMsgConnectionOpenAck; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } -impl Protobuf for MsgConnectionOpenAck {} +impl Protobuf for MsgConnectionOpenAck +where + C: ClientTypes + Clone, + Any: From, + MsgConnectionOpenAck: TryFrom, + as TryFrom>::Error: Display, +{ +} -impl TryFrom for MsgConnectionOpenAck { - type Error = Error; +impl TryFrom for MsgConnectionOpenAck +where + C: ClientTypes, + C::AnyClientState: TryFrom, +{ + type Error = Error; - fn try_from(msg: RawMsgConnectionOpenAck) -> Result { - let consensus_height = msg - .consensus_height - .ok_or_else(Error::missing_consensus_height)? - .into(); - let consensus_proof_obj = ConsensusProof::new( - msg.proof_consensus - .try_into() - .map_err(Error::invalid_proof)?, - consensus_height, - ) - .map_err(Error::invalid_proof)?; + fn try_from(msg: RawMsgConnectionOpenAck) -> Result { + let consensus_proof_obj = { + let proof_bytes: Option = msg.proof_consensus.try_into().ok(); + let consensus_height = msg + .consensus_height + .map(|height| Height::new(height.revision_number, height.revision_height)); + if proof_bytes.is_some() && consensus_height.is_some() { + Some( + ConsensusProof::new(proof_bytes.unwrap(), consensus_height.unwrap()) + .map_err(Error::invalid_proof)?, + ) + } else { + None + } + }; - let proof_height = msg - .proof_height - .ok_or_else(Error::missing_proof_height)? - .into(); + let proof_height = msg.proof_height.ok_or_else(Error::missing_proof_height)?.into(); - let client_proof = - CommitmentProofBytes::try_from(msg.proof_client).map_err(Error::invalid_proof)?; + let client_proof = + CommitmentProofBytes::try_from(msg.proof_client).map_err(Error::invalid_proof)?; - Ok(Self { - connection_id: msg - .connection_id - .parse() - .map_err(Error::invalid_identifier)?, - counterparty_connection_id: msg - .counterparty_connection_id - .parse() - .map_err(Error::invalid_identifier)?, - client_state: msg - .client_state - .map(AnyClientState::try_from) - .transpose() - .map_err(Error::ics02_client)?, - version: msg.version.ok_or_else(Error::empty_versions)?.try_into()?, - proofs: Proofs::new( - msg.proof_try.try_into().map_err(Error::invalid_proof)?, - Some(client_proof), - Option::from(consensus_proof_obj), - None, - proof_height, - ) - .map_err(Error::invalid_proof)?, - signer: msg.signer.parse().map_err(Error::signer)?, - }) - } + Ok(Self { + connection_id: msg.connection_id.parse().map_err(Error::invalid_identifier)?, + counterparty_connection_id: msg + .counterparty_connection_id + .parse() + .map_err(Error::invalid_identifier)?, + client_state: msg + .client_state + .map(C::AnyClientState::try_from) + .transpose() + .map_err(Error::ics02_client)?, + version: msg.version.ok_or_else(Error::empty_versions)?.try_into()?, + proofs: Proofs::new( + msg.proof_try.try_into().map_err(Error::invalid_proof)?, + Some(client_proof), + consensus_proof_obj, + None, + proof_height, + ) + .map_err(Error::invalid_proof)?, + signer: msg.signer.parse().map_err(Error::signer)?, + }) + } } -impl From for RawMsgConnectionOpenAck { - fn from(ics_msg: MsgConnectionOpenAck) -> Self { - RawMsgConnectionOpenAck { - connection_id: ics_msg.connection_id.as_str().to_string(), - counterparty_connection_id: ics_msg.counterparty_connection_id.as_str().to_string(), - client_state: ics_msg - .client_state - .map_or_else(|| None, |v| Some(v.into())), - proof_height: Some(ics_msg.proofs.height().into()), - proof_try: ics_msg.proofs.object_proof().clone().into(), - proof_client: ics_msg - .proofs - .client_proof() - .clone() - .map_or_else(Vec::new, |v| v.into()), - proof_consensus: ics_msg - .proofs - .consensus_proof() - .map_or_else(Vec::new, |v| v.proof().clone().into()), - consensus_height: ics_msg - .proofs - .consensus_proof() - .map_or_else(|| None, |h| Some(h.height().into())), - version: Some(ics_msg.version.into()), - signer: ics_msg.signer.to_string(), - } - } +impl From> for RawMsgConnectionOpenAck +where + C: ClientTypes, + Any: From, +{ + fn from(ics_msg: MsgConnectionOpenAck) -> Self { + RawMsgConnectionOpenAck { + connection_id: ics_msg.connection_id.as_str().to_string(), + counterparty_connection_id: ics_msg.counterparty_connection_id.as_str().to_string(), + client_state: ics_msg.client_state.map_or_else(|| None, |v| Some(v.into())), + proof_height: Some(ics_msg.proofs.height().into()), + proof_try: ics_msg.proofs.object_proof().clone().into(), + proof_client: ics_msg.proofs.client_proof().clone().map_or_else(Vec::new, |v| v.into()), + proof_consensus: ics_msg + .proofs + .consensus_proof() + .map_or_else(Vec::new, |v| v.proof().clone().into()), + consensus_height: ics_msg + .proofs + .consensus_proof() + .map_or_else(|| None, |h| Some(h.height().into())), + version: Some(ics_msg.version.into()), + signer: ics_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::client::v1::Height; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenAck as RawMsgConnectionOpenAck; + use crate::{mock::client_state::AnyClientState, prelude::*}; + use ibc_proto::ibc::core::{ + client::v1::Height, connection::v1::MsgConnectionOpenAck as RawMsgConnectionOpenAck, + }; - use crate::core::ics03_connection::version::Version; - use crate::core::ics24_host::identifier::ConnectionId; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; + use crate::{ + core::{ics03_connection::version::Version, ics24_host::identifier::ConnectionId}, + mock::{client_state::MockClientState, header::MockHeader}, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; - pub fn get_dummy_raw_msg_conn_open_ack( - proof_height: u64, - consensus_height: u64, - ) -> RawMsgConnectionOpenAck { - RawMsgConnectionOpenAck { - connection_id: ConnectionId::new(0).to_string(), - counterparty_connection_id: ConnectionId::new(1).to_string(), - proof_try: get_dummy_proof(), - proof_height: Some(Height { - revision_number: 0, - revision_height: proof_height, - }), - proof_consensus: get_dummy_proof(), - consensus_height: Some(Height { - revision_number: 0, - revision_height: consensus_height, - }), - client_state: None, - proof_client: get_dummy_proof(), - version: Some(Version::default().into()), - signer: get_dummy_bech32_account(), - } - } + pub fn get_dummy_raw_msg_conn_open_ack( + proof_height: u64, + consensus_height: u64, + ) -> RawMsgConnectionOpenAck { + RawMsgConnectionOpenAck { + connection_id: ConnectionId::new(0).to_string(), + counterparty_connection_id: ConnectionId::new(1).to_string(), + proof_try: get_dummy_proof(), + proof_height: Some(Height { revision_number: 0, revision_height: proof_height }), + proof_consensus: get_dummy_proof(), + consensus_height: Some(Height { + revision_number: 0, + revision_height: consensus_height, + }), + client_state: Some( + AnyClientState::Mock(MockClientState::new(MockHeader::default().into())).into(), + ), + proof_client: get_dummy_proof(), + version: Some(Version::default().into()), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; + use crate::prelude::*; - use test_log::test; + use test_log::test; - use ibc_proto::ibc::core::client::v1::Height; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenAck as RawMsgConnectionOpenAck; + use ibc_proto::ibc::core::{ + client::v1::Height, connection::v1::MsgConnectionOpenAck as RawMsgConnectionOpenAck, + }; - use crate::core::ics03_connection::msgs::conn_open_ack::test_util::get_dummy_raw_msg_conn_open_ack; - use crate::core::ics03_connection::msgs::conn_open_ack::MsgConnectionOpenAck; + use crate::{ + core::ics03_connection::msgs::conn_open_ack::{ + test_util::get_dummy_raw_msg_conn_open_ack, MsgConnectionOpenAck, + }, + mock::context::{MockClientTypes, MockContext}, + }; - #[test] - fn parse_connection_open_ack_msg() { - #[derive(Clone, Debug, PartialEq)] - struct Test { - name: String, - raw: RawMsgConnectionOpenAck, - want_pass: bool, - } + #[test] + fn parse_connection_open_ack_msg() { + #[derive(Clone, Debug, PartialEq)] + struct Test { + name: String, + raw: RawMsgConnectionOpenAck, + want_pass: bool, + } - let default_ack_msg = get_dummy_raw_msg_conn_open_ack(5, 5); + let default_ack_msg = get_dummy_raw_msg_conn_open_ack(5, 5); - let tests: Vec = vec![ - Test { - name: "Good parameters".to_string(), - raw: default_ack_msg.clone(), - want_pass: true, - }, - Test { - name: "Bad connection id, non-alpha".to_string(), - raw: RawMsgConnectionOpenAck { - connection_id: "con007".to_string(), - ..default_ack_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad version, missing version".to_string(), - raw: RawMsgConnectionOpenAck { - version: None, - ..default_ack_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad proof height, height is 0".to_string(), - raw: RawMsgConnectionOpenAck { - proof_height: Some(Height { - revision_number: 1, - revision_height: 0, - }), - ..default_ack_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad consensus height, height is 0".to_string(), - raw: RawMsgConnectionOpenAck { - consensus_height: Some(Height { - revision_number: 1, - revision_height: 0, - }), - ..default_ack_msg - }, - want_pass: false, - }, - ] - .into_iter() - .collect(); + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_ack_msg.clone(), + want_pass: true, + }, + Test { + name: "Bad connection id, non-alpha".to_string(), + raw: RawMsgConnectionOpenAck { + connection_id: "con007".to_string(), + ..default_ack_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad version, missing version".to_string(), + raw: RawMsgConnectionOpenAck { version: None, ..default_ack_msg.clone() }, + want_pass: false, + }, + Test { + name: "Bad proof height, height is 0".to_string(), + raw: RawMsgConnectionOpenAck { + proof_height: Some(Height { revision_number: 1, revision_height: 0 }), + ..default_ack_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad consensus height, height is 0".to_string(), + raw: RawMsgConnectionOpenAck { + consensus_height: Some(Height { revision_number: 1, revision_height: 0 }), + ..default_ack_msg + }, + want_pass: false, + }, + ] + .into_iter() + .collect(); - for test in tests { - let msg = MsgConnectionOpenAck::try_from(test.raw.clone()); + for test in tests { + let msg = + MsgConnectionOpenAck::>::try_from(test.raw.clone()); - assert_eq!( - test.want_pass, - msg.is_ok(), - "MsgConnOpenAck::new failed for test {}, \nmsg {:?} with error {:?}", - test.name, - test.raw, - msg.err(), - ); - } - } + assert_eq!( + test.want_pass, + msg.is_ok(), + "MsgConnOpenAck::new failed for test {}, \nmsg {:?} with error {:?}", + test.name, + test.raw, + msg.err(), + ); + } + } - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_conn_open_ack(5, 6); - let msg = MsgConnectionOpenAck::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgConnectionOpenAck::from(msg.clone()); - let msg_back = MsgConnectionOpenAck::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_conn_open_ack(5, 6); + let msg = + MsgConnectionOpenAck::>::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgConnectionOpenAck::from(msg.clone()); + let msg_back = MsgConnectionOpenAck::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics03_connection/msgs/conn_open_confirm.rs b/modules/src/core/ics03_connection/msgs/conn_open_confirm.rs index 8a15be9860..f5c31396f1 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_confirm.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_confirm.rs @@ -4,170 +4,161 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenConfirm as RawMsgConnectionOpenConfirm; -use crate::core::ics03_connection::error::Error; -use crate::core::ics24_host::identifier::ConnectionId; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ics03_connection::error::Error, ics24_host::identifier::ConnectionId}, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.connection.v1.MsgConnectionOpenConfirm"; /// /// Message definition for `MsgConnectionOpenConfirm` (i.e., `ConnOpenConfirm` datagram). -/// #[derive(Clone, Debug, PartialEq, Eq)] pub struct MsgConnectionOpenConfirm { - pub connection_id: ConnectionId, - pub proofs: Proofs, - pub signer: Signer, + pub connection_id: ConnectionId, + pub proofs: Proofs, + pub signer: Signer, } impl Msg for MsgConnectionOpenConfirm { - type ValidationError = Error; - type Raw = RawMsgConnectionOpenConfirm; + type ValidationError = Error; + type Raw = RawMsgConnectionOpenConfirm; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgConnectionOpenConfirm {} impl TryFrom for MsgConnectionOpenConfirm { - type Error = Error; - - fn try_from(msg: RawMsgConnectionOpenConfirm) -> Result { - let proof_height = msg - .proof_height - .ok_or_else(Error::missing_proof_height)? - .into(); - - Ok(Self { - connection_id: msg - .connection_id - .parse() - .map_err(Error::invalid_identifier)?, - proofs: Proofs::new( - msg.proof_ack.try_into().map_err(Error::invalid_proof)?, - None, - None, - None, - proof_height, - ) - .map_err(Error::invalid_proof)?, - signer: msg.signer.parse().map_err(Error::signer)?, - }) - } + type Error = Error; + + fn try_from(msg: RawMsgConnectionOpenConfirm) -> Result { + let proof_height = msg.proof_height.ok_or_else(Error::missing_proof_height)?.into(); + + Ok(Self { + connection_id: msg.connection_id.parse().map_err(Error::invalid_identifier)?, + proofs: Proofs::new( + msg.proof_ack.try_into().map_err(Error::invalid_proof)?, + None, + None, + None, + proof_height, + ) + .map_err(Error::invalid_proof)?, + signer: msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgConnectionOpenConfirm { - fn from(ics_msg: MsgConnectionOpenConfirm) -> Self { - RawMsgConnectionOpenConfirm { - connection_id: ics_msg.connection_id.as_str().to_string(), - proof_ack: ics_msg.proofs.object_proof().clone().into(), - proof_height: Some(ics_msg.proofs.height().into()), - signer: ics_msg.signer.to_string(), - } - } + fn from(ics_msg: MsgConnectionOpenConfirm) -> Self { + RawMsgConnectionOpenConfirm { + connection_id: ics_msg.connection_id.as_str().to_string(), + proof_ack: ics_msg.proofs.object_proof().clone().into(), + proof_height: Some(ics_msg.proofs.height().into()), + signer: ics_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::client::v1::Height; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenConfirm as RawMsgConnectionOpenConfirm; - - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - - pub fn get_dummy_raw_msg_conn_open_confirm() -> RawMsgConnectionOpenConfirm { - RawMsgConnectionOpenConfirm { - connection_id: "srcconnection".to_string(), - proof_ack: get_dummy_proof(), - proof_height: Some(Height { - revision_number: 0, - revision_height: 10, - }), - signer: get_dummy_bech32_account(), - } - } + use crate::prelude::*; + use ibc_proto::ibc::core::{ + client::v1::Height, connection::v1::MsgConnectionOpenConfirm as RawMsgConnectionOpenConfirm, + }; + + use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; + + pub fn get_dummy_raw_msg_conn_open_confirm() -> RawMsgConnectionOpenConfirm { + RawMsgConnectionOpenConfirm { + connection_id: "srcconnection".to_string(), + proof_ack: get_dummy_proof(), + proof_height: Some(Height { revision_number: 0, revision_height: 10 }), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use ibc_proto::ibc::core::client::v1::Height; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenConfirm as RawMsgConnectionOpenConfirm; - - use crate::core::ics03_connection::msgs::conn_open_confirm::test_util::get_dummy_raw_msg_conn_open_confirm; - use crate::core::ics03_connection::msgs::conn_open_confirm::MsgConnectionOpenConfirm; - - #[test] - fn parse_connection_open_confirm_msg() { - #[derive(Clone, Debug, PartialEq)] - struct Test { - name: String, - raw: RawMsgConnectionOpenConfirm, - want_pass: bool, - } - - let default_ack_msg = get_dummy_raw_msg_conn_open_confirm(); - let tests: Vec = vec![ - Test { - name: "Good parameters".to_string(), - raw: default_ack_msg.clone(), - want_pass: true, - }, - Test { - name: "Bad connection id, non-alpha".to_string(), - raw: RawMsgConnectionOpenConfirm { - connection_id: "con007".to_string(), - ..default_ack_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad proof height, height is 0".to_string(), - raw: RawMsgConnectionOpenConfirm { - proof_height: Some(Height { - revision_number: 1, - revision_height: 0, - }), - ..default_ack_msg - }, - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let msg = MsgConnectionOpenConfirm::try_from(test.raw.clone()); - - assert_eq!( - test.want_pass, - msg.is_ok(), - "MsgConnOpenTry::new failed for test {}, \nmsg {:?} with error {:?}", - test.name, - test.raw, - msg.err(), - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_conn_open_confirm(); - let msg = MsgConnectionOpenConfirm::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgConnectionOpenConfirm::from(msg.clone()); - let msg_back = MsgConnectionOpenConfirm::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + use crate::prelude::*; + + use test_log::test; + + use ibc_proto::ibc::core::{ + client::v1::Height, connection::v1::MsgConnectionOpenConfirm as RawMsgConnectionOpenConfirm, + }; + + use crate::core::ics03_connection::msgs::conn_open_confirm::{ + test_util::get_dummy_raw_msg_conn_open_confirm, MsgConnectionOpenConfirm, + }; + + #[test] + fn parse_connection_open_confirm_msg() { + #[derive(Clone, Debug, PartialEq)] + struct Test { + name: String, + raw: RawMsgConnectionOpenConfirm, + want_pass: bool, + } + + let default_ack_msg = get_dummy_raw_msg_conn_open_confirm(); + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_ack_msg.clone(), + want_pass: true, + }, + Test { + name: "Bad connection id, non-alpha".to_string(), + raw: RawMsgConnectionOpenConfirm { + connection_id: "con007".to_string(), + ..default_ack_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad proof height, height is 0".to_string(), + raw: RawMsgConnectionOpenConfirm { + proof_height: Some(Height { revision_number: 1, revision_height: 0 }), + ..default_ack_msg + }, + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let msg = MsgConnectionOpenConfirm::try_from(test.raw.clone()); + + assert_eq!( + test.want_pass, + msg.is_ok(), + "MsgConnOpenTry::new failed for test {}, \nmsg {:?} with error {:?}", + test.name, + test.raw, + msg.err(), + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_conn_open_confirm(); + let msg = MsgConnectionOpenConfirm::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgConnectionOpenConfirm::from(msg.clone()); + let msg_back = MsgConnectionOpenConfirm::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics03_connection/msgs/conn_open_init.rs b/modules/src/core/ics03_connection/msgs/conn_open_init.rs index 196fbcf119..ba8f588b49 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_init.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_init.rs @@ -5,179 +5,187 @@ use core::time::Duration; use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenInit as RawMsgConnectionOpenInit; use tendermint_proto::Protobuf; -use crate::core::ics03_connection::connection::Counterparty; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::version::Version; -use crate::core::ics24_host::identifier::ClientId; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ + ics03_connection::{connection::Counterparty, error::Error, version::Version}, + ics24_host::identifier::ClientId, + }, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.connection.v1.MsgConnectionOpenInit"; /// /// Message definition `MsgConnectionOpenInit` (i.e., the `ConnOpenInit` datagram). -/// #[derive(Clone, Debug, PartialEq, Eq)] pub struct MsgConnectionOpenInit { - pub client_id: ClientId, - pub counterparty: Counterparty, - pub version: Option, - pub delay_period: Duration, - pub signer: Signer, + pub client_id: ClientId, + pub counterparty: Counterparty, + pub version: Option, + pub delay_period: Duration, + pub signer: Signer, } impl Msg for MsgConnectionOpenInit { - type ValidationError = Error; - type Raw = RawMsgConnectionOpenInit; + type ValidationError = Error; + type Raw = RawMsgConnectionOpenInit; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgConnectionOpenInit {} impl TryFrom for MsgConnectionOpenInit { - type Error = Error; - - fn try_from(msg: RawMsgConnectionOpenInit) -> Result { - Ok(Self { - client_id: msg.client_id.parse().map_err(Error::invalid_identifier)?, - counterparty: msg - .counterparty - .ok_or_else(Error::missing_counterparty)? - .try_into()?, - version: msg.version.map(|version| version.try_into()).transpose()?, - delay_period: Duration::from_nanos(msg.delay_period), - signer: msg.signer.parse().map_err(Error::signer)?, - }) - } + type Error = Error; + + fn try_from(msg: RawMsgConnectionOpenInit) -> Result { + Ok(Self { + client_id: msg.client_id.parse().map_err(Error::invalid_identifier)?, + counterparty: msg.counterparty.ok_or_else(Error::missing_counterparty)?.try_into()?, + version: msg.version.map(|version| version.try_into()).transpose()?, + delay_period: Duration::from_nanos(msg.delay_period), + signer: msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgConnectionOpenInit { - fn from(ics_msg: MsgConnectionOpenInit) -> Self { - RawMsgConnectionOpenInit { - client_id: ics_msg.client_id.as_str().to_string(), - counterparty: Some(ics_msg.counterparty.into()), - version: ics_msg.version.map(|version| version.into()), - delay_period: ics_msg.delay_period.as_nanos() as u64, - signer: ics_msg.signer.to_string(), - } - } + fn from(ics_msg: MsgConnectionOpenInit) -> Self { + RawMsgConnectionOpenInit { + client_id: ics_msg.client_id.as_str().to_string(), + counterparty: Some(ics_msg.counterparty.into()), + version: ics_msg.version.map(|version| version.into()), + delay_period: ics_msg.delay_period.as_nanos() as u64, + signer: ics_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenInit as RawMsgConnectionOpenInit; - - use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - use crate::core::ics03_connection::version::Version; - use crate::core::ics24_host::identifier::ClientId; - use crate::test_utils::get_dummy_bech32_account; - - /// Extends the implementation with additional helper methods. - impl MsgConnectionOpenInit { - /// Setter for `client_id`. Amenable to chaining, since it consumes the input message. - pub fn with_client_id(self, client_id: ClientId) -> Self { - MsgConnectionOpenInit { client_id, ..self } - } - } - - /// Returns a dummy message, for testing only. - /// Other unit tests may import this if they depend on a MsgConnectionOpenInit. - pub fn get_dummy_raw_msg_conn_open_init() -> RawMsgConnectionOpenInit { - RawMsgConnectionOpenInit { - client_id: ClientId::default().to_string(), - counterparty: Some(get_dummy_raw_counterparty()), - version: Some(Version::default().into()), - delay_period: 0, - signer: get_dummy_bech32_account(), - } - } + use crate::prelude::*; + use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenInit as RawMsgConnectionOpenInit; + + use crate::{ + core::{ + ics03_connection::{ + msgs::{ + conn_open_init::MsgConnectionOpenInit, test_util::get_dummy_raw_counterparty, + }, + version::Version, + }, + ics24_host::identifier::ClientId, + }, + test_utils::get_dummy_bech32_account, + }; + + /// Extends the implementation with additional helper methods. + impl MsgConnectionOpenInit { + /// Setter for `client_id`. Amenable to chaining, since it consumes the input message. + pub fn with_client_id(self, client_id: ClientId) -> Self { + MsgConnectionOpenInit { client_id, ..self } + } + } + + /// Returns a dummy message, for testing only. + /// Other unit tests may import this if they depend on a MsgConnectionOpenInit. + pub fn get_dummy_raw_msg_conn_open_init() -> RawMsgConnectionOpenInit { + RawMsgConnectionOpenInit { + client_id: ClientId::default().to_string(), + counterparty: Some(get_dummy_raw_counterparty()), + version: Some(Version::default().into()), + delay_period: 0, + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use ibc_proto::ibc::core::connection::v1::Counterparty as RawCounterparty; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenInit as RawMsgConnectionOpenInit; - - use super::MsgConnectionOpenInit; - use crate::core::ics03_connection::msgs::conn_open_init::test_util::get_dummy_raw_msg_conn_open_init; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - - #[test] - fn parse_connection_open_init_msg() { - #[derive(Clone, Debug, PartialEq)] - struct Test { - name: String, - raw: RawMsgConnectionOpenInit, - want_pass: bool, - } - - let default_init_msg = get_dummy_raw_msg_conn_open_init(); - - let tests: Vec = vec![ - Test { - name: "Good parameters".to_string(), - raw: default_init_msg.clone(), - want_pass: true, - }, - Test { - name: "Bad client id, name too short".to_string(), - raw: RawMsgConnectionOpenInit { - client_id: "client".to_string(), - ..default_init_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad destination connection id, name too long".to_string(), - raw: RawMsgConnectionOpenInit { - counterparty: Some(RawCounterparty { - connection_id: - "abcdefghijksdffjssdkflweldflsfladfsfwjkrekcmmsdfsdfjflddmnopqrstu" - .to_string(), - ..get_dummy_raw_counterparty() - }), - ..default_init_msg - }, - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let msg = MsgConnectionOpenInit::try_from(test.raw.clone()); - - assert_eq!( - test.want_pass, - msg.is_ok(), - "MsgConnOpenInit::new failed for test {}, \nmsg {:?} with error {:?}", - test.name, - test.raw, - msg.err(), - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_conn_open_init(); - let msg = MsgConnectionOpenInit::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgConnectionOpenInit::from(msg.clone()); - let msg_back = MsgConnectionOpenInit::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + use crate::prelude::*; + + use test_log::test; + + use ibc_proto::ibc::core::connection::v1::{ + Counterparty as RawCounterparty, MsgConnectionOpenInit as RawMsgConnectionOpenInit, + }; + + use super::MsgConnectionOpenInit; + use crate::core::ics03_connection::msgs::{ + conn_open_init::test_util::get_dummy_raw_msg_conn_open_init, + test_util::get_dummy_raw_counterparty, + }; + + #[test] + fn parse_connection_open_init_msg() { + #[derive(Clone, Debug, PartialEq)] + struct Test { + name: String, + raw: RawMsgConnectionOpenInit, + want_pass: bool, + } + + let default_init_msg = get_dummy_raw_msg_conn_open_init(); + + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_init_msg.clone(), + want_pass: true, + }, + Test { + name: "Bad client id, name too short".to_string(), + raw: RawMsgConnectionOpenInit { + client_id: "client".to_string(), + ..default_init_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad destination connection id, name too long".to_string(), + raw: RawMsgConnectionOpenInit { + counterparty: Some(RawCounterparty { + connection_id: + "abcdefghijksdffjssdkflweldflsfladfsfwjkrekcmmsdfsdfjflddmnopqrstu" + .to_string(), + ..get_dummy_raw_counterparty() + }), + ..default_init_msg + }, + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let msg = MsgConnectionOpenInit::try_from(test.raw.clone()); + + assert_eq!( + test.want_pass, + msg.is_ok(), + "MsgConnOpenInit::new failed for test {}, \nmsg {:?} with error {:?}", + test.name, + test.raw, + msg.err(), + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_conn_open_init(); + let msg = MsgConnectionOpenInit::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgConnectionOpenInit::from(msg.clone()); + let msg_back = MsgConnectionOpenInit::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics03_connection/msgs/conn_open_try.rs b/modules/src/core/ics03_connection/msgs/conn_open_try.rs index 4709733489..66f82fa0dc 100644 --- a/modules/src/core/ics03_connection/msgs/conn_open_try.rs +++ b/modules/src/core/ics03_connection/msgs/conn_open_try.rs @@ -1,368 +1,382 @@ use crate::prelude::*; use core::{ - convert::{TryFrom, TryInto}, - str::FromStr, - time::Duration, + convert::{TryFrom, TryInto}, + fmt::{Debug, Display}, + time::Duration, }; +use ibc_proto::{google::protobuf::Any, ibc::core::connection::v1}; use tendermint_proto::Protobuf; +use crate::core::ics02_client; use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenTry as RawMsgConnectionOpenTry; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics03_connection::connection::Counterparty; -use crate::core::ics03_connection::error::Error; -use crate::core::ics03_connection::version::Version; -use crate::core::ics23_commitment::commitment::CommitmentProofBytes; -use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; -use crate::proofs::{ConsensusProof, Proofs}; -use crate::signer::Signer; -use crate::tx_msg::Msg; -use crate::Height; +use crate::core::ics02_client::context::ClientTypes; +use crate::{ + core::{ + ics03_connection::{connection::Counterparty, error::Error, version::Version}, + ics23_commitment::commitment::CommitmentProofBytes, + ics24_host::identifier::ClientId, + }, + proofs::{ConsensusProof, Proofs}, + signer::Signer, + tx_msg::Msg, + Height, +}; pub const TYPE_URL: &str = "/ibc.core.connection.v1.MsgConnectionOpenTry"; /// /// Message definition `MsgConnectionOpenTry` (i.e., `ConnOpenTry` datagram). -/// #[derive(Clone, Debug, PartialEq, Eq)] -pub struct MsgConnectionOpenTry { - pub previous_connection_id: Option, - pub client_id: ClientId, - pub client_state: Option, - pub counterparty: Counterparty, - pub counterparty_versions: Vec, - pub proofs: Proofs, - pub delay_period: Duration, - pub signer: Signer, +pub struct MsgConnectionOpenTry { + pub client_id: ClientId, + pub client_state: Option, + pub counterparty: Counterparty, + pub counterparty_versions: Vec, + pub proofs: Proofs, + pub delay_period: Duration, + pub signer: Signer, } -impl MsgConnectionOpenTry { - /// Getter for accessing the `consensus_height` field from this message. Returns the special - /// value `0` if this field is not set. - pub fn consensus_height(&self) -> Height { - match self.proofs.consensus_proof() { - None => Height::zero(), - Some(p) => p.height(), - } - } +impl MsgConnectionOpenTry +where + C: ClientTypes + Clone + Debug + PartialEq + Eq, +{ + /// Getter for accessing the `consensus_height` field from this message. Returns the special + /// value `0` if this field is not set. + pub fn consensus_height(&self) -> Height { + match self.proofs.consensus_proof() { + None => Height::zero(), + Some(p) => p.height(), + } + } } -impl Msg for MsgConnectionOpenTry { - type ValidationError = Error; - type Raw = RawMsgConnectionOpenTry; - - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } - - fn type_url(&self) -> String { - TYPE_URL.to_string() - } +impl Msg for MsgConnectionOpenTry +where + C: ClientTypes + Clone + Debug + PartialEq + Eq, + Any: From, +{ + type ValidationError = Error; + type Raw = RawMsgConnectionOpenTry; + + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } + + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } -impl Protobuf for MsgConnectionOpenTry {} - -impl TryFrom for MsgConnectionOpenTry { - type Error = Error; - - fn try_from(msg: RawMsgConnectionOpenTry) -> Result { - let previous_connection_id = Some(msg.previous_connection_id) - .filter(|x| !x.is_empty()) - .map(|v| FromStr::from_str(v.as_str())) - .transpose() - .map_err(Error::invalid_identifier)?; - - let consensus_height = msg - .consensus_height - .ok_or_else(Error::missing_consensus_height)? - .into(); - - let consensus_proof_obj = ConsensusProof::new( - msg.proof_consensus - .try_into() - .map_err(Error::invalid_proof)?, - consensus_height, - ) - .map_err(Error::invalid_proof)?; - - let proof_height = msg - .proof_height - .ok_or_else(Error::missing_proof_height)? - .into(); - - let client_proof = - CommitmentProofBytes::try_from(msg.proof_client).map_err(Error::invalid_proof)?; - - let counterparty_versions = msg - .counterparty_versions - .into_iter() - .map(Version::try_from) - .collect::, _>>()?; - - if counterparty_versions.is_empty() { - return Err(Error::empty_versions()); - } +impl Protobuf for MsgConnectionOpenTry +where + C: ClientTypes + Clone + Debug + PartialEq + Eq, + Any: From, + MsgConnectionOpenTry: TryFrom, + as TryFrom>::Error: Display, +{ +} - Ok(Self { - previous_connection_id, - client_id: msg.client_id.parse().map_err(Error::invalid_identifier)?, - client_state: msg - .client_state - .map(AnyClientState::try_from) - .transpose() - .map_err(Error::ics02_client)?, - counterparty: msg - .counterparty - .ok_or_else(Error::missing_counterparty)? - .try_into()?, - counterparty_versions, - proofs: Proofs::new( - msg.proof_init.try_into().map_err(Error::invalid_proof)?, - Some(client_proof), - Some(consensus_proof_obj), - None, - proof_height, - ) - .map_err(Error::invalid_proof)?, - delay_period: Duration::from_nanos(msg.delay_period), - signer: msg.signer.parse().map_err(Error::signer)?, - }) - } +impl TryFrom for MsgConnectionOpenTry +where + C: ClientTypes + Clone + Debug + PartialEq + Eq, + C::AnyClientState: TryFrom, +{ + type Error = Error; + + fn try_from(msg: RawMsgConnectionOpenTry) -> Result { + let consensus_proof_obj = { + let proof_bytes: Option = msg.proof_consensus.try_into().ok(); + let consensus_height = msg + .consensus_height + .map(|height| Height::new(height.revision_number, height.revision_height)); + if proof_bytes.is_some() && consensus_height.is_some() { + Some( + ConsensusProof::new(proof_bytes.unwrap(), consensus_height.unwrap()) + .map_err(Error::invalid_proof)?, + ) + } else { + None + } + }; + + let proof_height = msg.proof_height.ok_or_else(Error::missing_proof_height)?.into(); + + let client_proof = + CommitmentProofBytes::try_from(msg.proof_client).map_err(Error::invalid_proof)?; + + let counterparty_versions = msg + .counterparty_versions + .into_iter() + .map(Version::try_from) + .collect::, _>>()?; + + if counterparty_versions.is_empty() { + return Err(Error::empty_versions()); + } + + Ok(Self { + client_id: msg.client_id.parse().map_err(Error::invalid_identifier)?, + client_state: msg + .client_state + .map(C::AnyClientState::try_from) + .transpose() + .map_err(Error::ics02_client)?, + counterparty: msg.counterparty.ok_or_else(Error::missing_counterparty)?.try_into()?, + counterparty_versions, + proofs: Proofs::new( + msg.proof_init.try_into().map_err(Error::invalid_proof)?, + Some(client_proof), + consensus_proof_obj, + None, + proof_height, + ) + .map_err(Error::invalid_proof)?, + delay_period: Duration::from_nanos(msg.delay_period), + signer: msg.signer.parse().map_err(Error::signer)?, + }) + } } -impl From for RawMsgConnectionOpenTry { - fn from(ics_msg: MsgConnectionOpenTry) -> Self { - RawMsgConnectionOpenTry { - client_id: ics_msg.client_id.as_str().to_string(), - previous_connection_id: ics_msg - .previous_connection_id - .map_or_else(|| "".to_string(), |v| v.as_str().to_string()), - client_state: ics_msg - .client_state - .map_or_else(|| None, |v| Some(v.into())), - counterparty: Some(ics_msg.counterparty.into()), - delay_period: ics_msg.delay_period.as_nanos() as u64, - counterparty_versions: ics_msg - .counterparty_versions - .iter() - .map(|v| v.clone().into()) - .collect(), - proof_height: Some(ics_msg.proofs.height().into()), - proof_init: ics_msg.proofs.object_proof().clone().into(), - proof_client: ics_msg - .proofs - .client_proof() - .clone() - .map_or_else(Vec::new, |v| v.into()), - proof_consensus: ics_msg - .proofs - .consensus_proof() - .map_or_else(Vec::new, |v| v.proof().clone().into()), - consensus_height: ics_msg - .proofs - .consensus_proof() - .map_or_else(|| None, |h| Some(h.height().into())), - signer: ics_msg.signer.to_string(), - } - } +impl From> for RawMsgConnectionOpenTry +where + C: ClientTypes + Clone + Debug + PartialEq + Eq, + Any: From, +{ + fn from(ics_msg: MsgConnectionOpenTry) -> Self { + RawMsgConnectionOpenTry { + client_id: ics_msg.client_id.as_str().to_string(), + client_state: ics_msg.client_state.map_or_else(|| None, |v| Some(v.into())), + counterparty: Some(ics_msg.counterparty.into()), + delay_period: ics_msg.delay_period.as_nanos() as u64, + counterparty_versions: ics_msg + .counterparty_versions + .iter() + .map(|v| v.clone().into()) + .collect(), + proof_height: Some(ics_msg.proofs.height().into()), + proof_init: ics_msg.proofs.object_proof().clone().into(), + proof_client: ics_msg.proofs.client_proof().clone().map_or_else(Vec::new, |v| v.into()), + proof_consensus: ics_msg + .proofs + .consensus_proof() + .map_or_else(Vec::new, |v| v.proof().clone().into()), + consensus_height: ics_msg + .proofs + .consensus_proof() + .map_or_else(|| None, |h| Some(h.height().into())), + signer: ics_msg.signer.to_string(), + ..Default::default() + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::client::v1::Height; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenTry as RawMsgConnectionOpenTry; - - use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - - /// Testing-specific helper methods. - impl MsgConnectionOpenTry { - /// Moves the given message into another one, and updates the `previous_connection_id` field. - pub fn with_previous_connection_id( - self, - previous_connection_id: Option, - ) -> MsgConnectionOpenTry { - MsgConnectionOpenTry { - previous_connection_id, - ..self - } - } - - /// Setter for `client_id`. - pub fn with_client_id(self, client_id: ClientId) -> MsgConnectionOpenTry { - MsgConnectionOpenTry { client_id, ..self } - } - } - - /// Returns a dummy `RawMsgConnectionOpenTry` with parametrized heights. The parameter - /// `proof_height` represents the height, on the source chain, at which this chain produced the - /// proof. Parameter `consensus_height` represents the height of destination chain which a - /// client on the source chain stores. - pub fn get_dummy_raw_msg_conn_open_try( - proof_height: u64, - consensus_height: u64, - ) -> RawMsgConnectionOpenTry { - RawMsgConnectionOpenTry { - client_id: ClientId::default().to_string(), - previous_connection_id: ConnectionId::default().to_string(), - client_state: None, - counterparty: Some(get_dummy_raw_counterparty()), - delay_period: 0, - counterparty_versions: get_compatible_versions() - .iter() - .map(|v| v.clone().into()) - .collect(), - proof_init: get_dummy_proof(), - proof_height: Some(Height { - revision_number: 0, - revision_height: proof_height, - }), - proof_consensus: get_dummy_proof(), - consensus_height: Some(Height { - revision_number: 0, - revision_height: consensus_height, - }), - proof_client: get_dummy_proof(), - signer: get_dummy_bech32_account(), - } - } + use crate::core::ics02_client::context::ClientTypes; + use crate::{ + core::{ + ics03_connection::{ + msgs::{ + conn_open_try::MsgConnectionOpenTry, test_util::get_dummy_raw_counterparty, + }, + version::get_compatible_versions, + }, + ics24_host::identifier::ClientId, + }, + mock::{ + client_state::{AnyClientState, MockClientState}, + header::MockHeader, + }, + prelude::*, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + use core::fmt::Debug; + use ibc_proto::ibc::core::{ + client::v1::Height, connection::v1::MsgConnectionOpenTry as RawMsgConnectionOpenTry, + }; + + /// Testing-specific helper methods. + impl MsgConnectionOpenTry + where + C: ClientTypes + Clone + Debug + Eq, + { + /// Setter for `client_id`. + pub fn with_client_id(self, client_id: ClientId) -> MsgConnectionOpenTry { + MsgConnectionOpenTry { client_id, ..self } + } + } + + /// Returns a dummy `RawMsgConnectionOpenTry` with parametrized heights. The parameter + /// `proof_height` represents the height, on the source chain, at which this chain produced the + /// proof. Parameter `consensus_height` represents the height of destination chain which a + /// client on the source chain stores. + pub fn get_dummy_raw_msg_conn_open_try( + proof_height: u64, + consensus_height: u64, + ) -> RawMsgConnectionOpenTry { + RawMsgConnectionOpenTry { + client_id: ClientId::default().to_string(), + client_state: Some( + AnyClientState::Mock(MockClientState::new(MockHeader::default().into())).into(), + ), + counterparty: Some(get_dummy_raw_counterparty()), + delay_period: 0, + counterparty_versions: get_compatible_versions() + .iter() + .map(|v| v.clone().into()) + .collect(), + proof_init: get_dummy_proof(), + proof_height: Some(Height { revision_number: 0, revision_height: proof_height }), + proof_consensus: get_dummy_proof(), + consensus_height: Some(Height { + revision_number: 0, + revision_height: consensus_height, + }), + proof_client: get_dummy_proof(), + signer: get_dummy_bech32_account(), + ..Default::default() + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use ibc_proto::ibc::core::client::v1::Height; - use ibc_proto::ibc::core::connection::v1::Counterparty as RawCounterparty; - use ibc_proto::ibc::core::connection::v1::MsgConnectionOpenTry as RawMsgConnectionOpenTry; - - use crate::core::ics03_connection::msgs::conn_open_try::test_util::get_dummy_raw_msg_conn_open_try; - use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - - #[test] - fn parse_connection_open_try_msg() { - #[derive(Clone, Debug, PartialEq)] - struct Test { - name: String, - raw: RawMsgConnectionOpenTry, - want_pass: bool, - } - - let default_try_msg = get_dummy_raw_msg_conn_open_try(10, 34); - - let tests: Vec = - vec![ - Test { - name: "Good parameters".to_string(), - raw: default_try_msg.clone(), - want_pass: true, - }, - Test { - name: "Bad client id, name too short".to_string(), - raw: RawMsgConnectionOpenTry { - client_id: "client".to_string(), - ..default_try_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad destination connection id, name too long".to_string(), - raw: RawMsgConnectionOpenTry { - counterparty: Some(RawCounterparty { - connection_id: - "abcdasdfasdfsdfasfdwefwfsdfsfsfasfwewvxcvdvwgadvaadsefghijklmnopqrstu" - .to_string(), - ..get_dummy_raw_counterparty() - }), - ..default_try_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Correct destination client id with lower/upper case and special chars" - .to_string(), - raw: RawMsgConnectionOpenTry { - counterparty: Some(RawCounterparty { - client_id: "ClientId_".to_string(), - ..get_dummy_raw_counterparty() - }), - ..default_try_msg.clone() - }, - want_pass: true, - }, - Test { - name: "Bad counterparty versions, empty versions vec".to_string(), - raw: RawMsgConnectionOpenTry { - counterparty_versions: Vec::new(), - ..default_try_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad counterparty versions, empty version string".to_string(), - raw: RawMsgConnectionOpenTry { - counterparty_versions: Vec::new(), - ..default_try_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad proof height, height is 0".to_string(), - raw: RawMsgConnectionOpenTry { - proof_height: Some(Height { revision_number: 1, revision_height: 0 }), - ..default_try_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad consensus height, height is 0".to_string(), - raw: RawMsgConnectionOpenTry { - proof_height: Some(Height { revision_number: 1, revision_height: 0 }), - ..default_try_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Empty proof".to_string(), - raw: RawMsgConnectionOpenTry { - proof_init: b"".to_vec(), - ..default_try_msg - }, - want_pass: false, - } - ] - .into_iter() - .collect(); - - for test in tests { - let msg = MsgConnectionOpenTry::try_from(test.raw.clone()); - - assert_eq!( - test.want_pass, - msg.is_ok(), - "MsgConnOpenTry::new failed for test {}, \nmsg {:?} with error {:?}", - test.name, - test.raw, - msg.err(), - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_conn_open_try(10, 34); - let msg = MsgConnectionOpenTry::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgConnectionOpenTry::from(msg.clone()); - let msg_back = MsgConnectionOpenTry::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + use crate::prelude::*; + + use test_log::test; + + use ibc_proto::ibc::core::{ + client::v1::Height, + connection::v1::{ + Counterparty as RawCounterparty, MsgConnectionOpenTry as RawMsgConnectionOpenTry, + }, + }; + + use crate::{ + core::ics03_connection::msgs::{ + conn_open_try::{test_util::get_dummy_raw_msg_conn_open_try, MsgConnectionOpenTry}, + test_util::get_dummy_raw_counterparty, + }, + mock::context::{MockClientTypes, MockContext}, + }; + + #[test] + fn parse_connection_open_try_msg() { + #[derive(Clone, Debug, PartialEq)] + struct Test { + name: String, + raw: RawMsgConnectionOpenTry, + want_pass: bool, + } + + let default_try_msg = get_dummy_raw_msg_conn_open_try(10, 34); + + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_try_msg.clone(), + want_pass: true, + }, + Test { + name: "Bad client id, name too short".to_string(), + raw: RawMsgConnectionOpenTry { + client_id: "client".to_string(), + ..default_try_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad destination connection id, name too long".to_string(), + raw: RawMsgConnectionOpenTry { + counterparty: Some(RawCounterparty { + connection_id: + "abcdasdfasdfsdfasfdwefwfsdfsfsfasfwewvxcvdvwgadvaadsefghijklmnopqrstu" + .to_string(), + ..get_dummy_raw_counterparty() + }), + ..default_try_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Correct destination client id with lower/upper case and special chars" + .to_string(), + raw: RawMsgConnectionOpenTry { + counterparty: Some(RawCounterparty { + client_id: "ClientId_".to_string(), + ..get_dummy_raw_counterparty() + }), + ..default_try_msg.clone() + }, + want_pass: true, + }, + Test { + name: "Bad counterparty versions, empty versions vec".to_string(), + raw: RawMsgConnectionOpenTry { + counterparty_versions: Vec::new(), + ..default_try_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad counterparty versions, empty version string".to_string(), + raw: RawMsgConnectionOpenTry { + counterparty_versions: Vec::new(), + ..default_try_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad proof height, height is 0".to_string(), + raw: RawMsgConnectionOpenTry { + proof_height: Some(Height { revision_number: 1, revision_height: 0 }), + ..default_try_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Bad consensus height, height is 0".to_string(), + raw: RawMsgConnectionOpenTry { + proof_height: Some(Height { revision_number: 1, revision_height: 0 }), + ..default_try_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Empty proof".to_string(), + raw: RawMsgConnectionOpenTry { proof_init: b"".to_vec(), ..default_try_msg }, + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let msg = + MsgConnectionOpenTry::>::try_from(test.raw.clone()); + + assert_eq!( + test.want_pass, + msg.is_ok(), + "MsgConnOpenTry::new failed for test {}, \nmsg {:?} with error {:?}", + test.name, + test.raw, + msg.err(), + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_conn_open_try(10, 34); + let msg = + MsgConnectionOpenTry::>::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgConnectionOpenTry::from(msg.clone()); + let msg_back = MsgConnectionOpenTry::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics03_connection/version.rs b/modules/src/core/ics03_connection/version.rs index 652262d6ec..2ef8ae5090 100644 --- a/modules/src/core/ics03_connection/version.rs +++ b/modules/src/core/ics03_connection/version.rs @@ -6,304 +6,265 @@ use ibc_proto::ibc::core::connection::v1::Version as RawVersion; use serde::{Deserialize, Serialize}; use tendermint_proto::Protobuf; -use crate::core::ics03_connection::error::Error; -use crate::core::ics04_channel::channel::Order; +use crate::core::{ics03_connection::error::Error, ics04_channel::channel::Order}; /// Stores the identifier and the features supported by a version #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct Version { - /// unique version identifier - identifier: String, - /// list of features compatible with the specified identifier - features: Vec, + /// unique version identifier + identifier: String, + /// list of features compatible with the specified identifier + features: Vec, } impl Version { - /// Checks whether or not the given feature is supported in this versin - pub fn is_supported_feature(&self, feature: String) -> bool { - self.features.contains(&feature) - } + /// Checks whether or not the given feature is supported in this versin + pub fn is_supported_feature(&self, feature: String) -> bool { + self.features.contains(&feature) + } } impl Protobuf for Version {} impl TryFrom for Version { - type Error = Error; - fn try_from(value: RawVersion) -> Result { - if value.identifier.trim().is_empty() { - return Err(Error::empty_versions()); - } - for feature in value.features.iter() { - if feature.trim().is_empty() { - return Err(Error::empty_features()); - } - } - Ok(Version { - identifier: value.identifier, - features: value.features, - }) - } + type Error = Error; + fn try_from(value: RawVersion) -> Result { + if value.identifier.trim().is_empty() { + return Err(Error::empty_versions()) + } + for feature in value.features.iter() { + if feature.trim().is_empty() { + return Err(Error::empty_features()) + } + } + Ok(Version { identifier: value.identifier, features: value.features }) + } } impl From for RawVersion { - fn from(value: Version) -> Self { - Self { - identifier: value.identifier, - features: value.features, - } - } + fn from(value: Version) -> Self { + Self { identifier: value.identifier, features: value.features } + } } impl Default for Version { - fn default() -> Self { - Version { - identifier: "1".to_string(), - features: vec![ - Order::Ordered.as_str().to_owned(), - Order::Unordered.as_str().to_owned(), - ], - } - } + fn default() -> Self { + Version { + identifier: "1".to_string(), + features: vec![ + Order::Ordered.as_str().to_owned(), + Order::Unordered.as_str().to_owned(), + ], + } + } } impl Display for Version { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "{}", self.identifier) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "{}", self.identifier) + } } /// Returns the lists of supported versions pub fn get_compatible_versions() -> Vec { - vec![Version::default()] + vec![Version::default()] } /// Selects a version from the intersection of locally supported and counterparty versions. pub fn pick_version( - supported_versions: Vec, - counterparty_versions: Vec, + supported_versions: Vec, + counterparty_versions: Vec, ) -> Result { - let mut intersection: Vec = Vec::new(); - for s in supported_versions.iter() { - for c in counterparty_versions.iter() { - if c.identifier != s.identifier { - continue; - } - for feature in c.features.iter() { - if feature.trim().is_empty() { - return Err(Error::empty_features()); - } - } - intersection.append(&mut vec![s.clone()]); - } - } - intersection.sort_by(|a, b| a.identifier.cmp(&b.identifier)); - if intersection.is_empty() { - return Err(Error::no_common_version()); - } - Ok(intersection[0].clone()) + let mut intersection: Vec = Vec::new(); + for s in supported_versions.iter() { + for c in counterparty_versions.iter() { + if c.identifier != s.identifier { + continue + } + for feature in c.features.iter() { + if feature.trim().is_empty() { + return Err(Error::empty_features()) + } + } + intersection.append(&mut vec![s.clone()]); + } + } + intersection.sort_by(|a, b| a.identifier.cmp(&b.identifier)); + if intersection.is_empty() { + return Err(Error::no_common_version()) + } + Ok(intersection[0].clone()) } #[cfg(test)] mod tests { - use crate::prelude::*; + use crate::prelude::*; - use test_log::test; + use test_log::test; - use ibc_proto::ibc::core::connection::v1::Version as RawVersion; + use ibc_proto::ibc::core::connection::v1::Version as RawVersion; - use crate::core::ics03_connection::error::Error; - use crate::core::ics03_connection::version::{get_compatible_versions, pick_version, Version}; + use crate::core::ics03_connection::{ + error::Error, + version::{get_compatible_versions, pick_version, Version}, + }; - fn good_versions() -> Vec { - vec![ - Version::default().into(), - RawVersion { - identifier: "2".to_string(), - features: vec!["ORDER_RANDOM".to_string(), "ORDER_UNORDERED".to_string()], - }, - ] - .into_iter() - .collect() - } + fn good_versions() -> Vec { + vec![ + Version::default().into(), + RawVersion { + identifier: "2".to_string(), + features: vec!["ORDER_RANDOM".to_string(), "ORDER_UNORDERED".to_string()], + }, + ] + .into_iter() + .collect() + } - fn bad_versions_identifier() -> Vec { - vec![RawVersion { - identifier: "".to_string(), - features: vec!["ORDER_RANDOM".to_string(), "ORDER_UNORDERED".to_string()], - }] - .into_iter() - .collect() - } + fn bad_versions_identifier() -> Vec { + vec![RawVersion { + identifier: "".to_string(), + features: vec!["ORDER_RANDOM".to_string(), "ORDER_UNORDERED".to_string()], + }] + .into_iter() + .collect() + } - fn bad_versions_features() -> Vec { - vec![RawVersion { - identifier: "2".to_string(), - features: vec!["".to_string()], - }] - .into_iter() - .collect() - } + fn bad_versions_features() -> Vec { + vec![RawVersion { identifier: "2".to_string(), features: vec!["".to_string()] }] + .into_iter() + .collect() + } - fn overlapping() -> (Vec, Vec, Version) { - ( - vec![ - Version::default(), - Version { - identifier: "3".to_string(), - features: Vec::new(), - }, - Version { - identifier: "4".to_string(), - features: Vec::new(), - }, - ] - .into_iter() - .collect(), - vec![ - Version { - identifier: "2".to_string(), - features: Vec::new(), - }, - Version { - identifier: "4".to_string(), - features: Vec::new(), - }, - Version { - identifier: "3".to_string(), - features: Vec::new(), - }, - ] - .into_iter() - .collect(), - // Should pick version 3 as it's the lowest of the intersection {3, 4} - Version { - identifier: "3".to_string(), - features: Vec::new(), - }, - ) - } + fn overlapping() -> (Vec, Vec, Version) { + ( + vec![ + Version::default(), + Version { identifier: "3".to_string(), features: Vec::new() }, + Version { identifier: "4".to_string(), features: Vec::new() }, + ] + .into_iter() + .collect(), + vec![ + Version { identifier: "2".to_string(), features: Vec::new() }, + Version { identifier: "4".to_string(), features: Vec::new() }, + Version { identifier: "3".to_string(), features: Vec::new() }, + ] + .into_iter() + .collect(), + // Should pick version 3 as it's the lowest of the intersection {3, 4} + Version { identifier: "3".to_string(), features: Vec::new() }, + ) + } - fn disjoint() -> (Vec, Vec) { - ( - vec![Version { - identifier: "1".to_string(), - features: Vec::new(), - }] - .into_iter() - .collect(), - vec![Version { - identifier: "2".to_string(), - features: Vec::new(), - }] - .into_iter() - .collect(), - ) - } + fn disjoint() -> (Vec, Vec) { + ( + vec![Version { identifier: "1".to_string(), features: Vec::new() }] + .into_iter() + .collect(), + vec![Version { identifier: "2".to_string(), features: Vec::new() }] + .into_iter() + .collect(), + ) + } - #[test] - fn verify() { - struct Test { - name: String, - versions: Vec, - want_pass: bool, - } - let tests: Vec = vec![ - Test { - name: "Compatible versions".to_string(), - versions: vec![Version::default().into()], - want_pass: true, - }, - Test { - name: "Multiple versions".to_string(), - versions: good_versions(), - want_pass: true, - }, - Test { - name: "Bad version identifier".to_string(), - versions: bad_versions_identifier(), - want_pass: false, - }, - Test { - name: "Bad version feature".to_string(), - versions: bad_versions_features(), - want_pass: false, - }, - Test { - name: "Bad versions empty".to_string(), - versions: Vec::new(), - want_pass: true, - }, - ]; + #[test] + fn verify() { + struct Test { + name: String, + versions: Vec, + want_pass: bool, + } + let tests: Vec = vec![ + Test { + name: "Compatible versions".to_string(), + versions: vec![Version::default().into()], + want_pass: true, + }, + Test { + name: "Multiple versions".to_string(), + versions: good_versions(), + want_pass: true, + }, + Test { + name: "Bad version identifier".to_string(), + versions: bad_versions_identifier(), + want_pass: false, + }, + Test { + name: "Bad version feature".to_string(), + versions: bad_versions_features(), + want_pass: false, + }, + Test { name: "Bad versions empty".to_string(), versions: Vec::new(), want_pass: true }, + ]; - for test in tests { - let versions = test - .versions - .into_iter() - .map(Version::try_from) - .collect::, _>>(); + for test in tests { + let versions = + test.versions.into_iter().map(Version::try_from).collect::, _>>(); - assert_eq!( - test.want_pass, - versions.is_ok(), - "Validate versions failed for test {} with error {:?}", - test.name, - versions.err(), - ); - } - } - #[test] - fn pick() { - struct Test { - name: String, - supported: Vec, - counterparty: Vec, - picked: Result, - want_pass: bool, - } - let tests: Vec = vec![ - Test { - name: "Compatible versions".to_string(), - supported: get_compatible_versions(), - counterparty: get_compatible_versions(), - picked: Ok(Version::default()), - want_pass: true, - }, - Test { - name: "Overlapping versions".to_string(), - supported: overlapping().0, - counterparty: overlapping().1, - picked: Ok(overlapping().2), - want_pass: true, - }, - Test { - name: "Disjoint versions".to_string(), - supported: disjoint().0, - counterparty: disjoint().1, - picked: Err(Error::no_common_version()), - want_pass: false, - }, - ]; + assert_eq!( + test.want_pass, + versions.is_ok(), + "Validate versions failed for test {} with error {:?}", + test.name, + versions.err(), + ); + } + } + #[test] + fn pick() { + struct Test { + name: String, + supported: Vec, + counterparty: Vec, + picked: Result, + want_pass: bool, + } + let tests: Vec = vec![ + Test { + name: "Compatible versions".to_string(), + supported: get_compatible_versions(), + counterparty: get_compatible_versions(), + picked: Ok(Version::default()), + want_pass: true, + }, + Test { + name: "Overlapping versions".to_string(), + supported: overlapping().0, + counterparty: overlapping().1, + picked: Ok(overlapping().2), + want_pass: true, + }, + Test { + name: "Disjoint versions".to_string(), + supported: disjoint().0, + counterparty: disjoint().1, + picked: Err(Error::no_common_version()), + want_pass: false, + }, + ]; - for test in tests { - let version = pick_version(test.supported, test.counterparty); + for test in tests { + let version = pick_version(test.supported, test.counterparty); - assert_eq!( - test.want_pass, - version.is_ok(), - "Validate versions failed for test {}", - test.name, - ); + assert_eq!( + test.want_pass, + version.is_ok(), + "Validate versions failed for test {}", + test.name, + ); - if test.want_pass { - assert_eq!(version.unwrap(), test.picked.unwrap()); - } - } - } - #[test] - fn serialize() { - let def = Version::default(); - let def_raw: RawVersion = def.clone().into(); - let def_back = def_raw.try_into().unwrap(); - assert_eq!(def, def_back); - } + if test.want_pass { + assert_eq!(version.unwrap(), test.picked.unwrap()); + } + } + } + #[test] + fn serialize() { + let def = Version::default(); + let def_raw: RawVersion = def.clone().into(); + let def_back = def_raw.try_into().unwrap(); + assert_eq!(def, def_back); + } } diff --git a/modules/src/core/ics04_channel/channel.rs b/modules/src/core/ics04_channel/channel.rs index 0e8087e291..08e025fda4 100644 --- a/modules/src/core/ics04_channel/channel.rs +++ b/modules/src/core/ics04_channel/channel.rs @@ -1,598 +1,544 @@ use crate::prelude::*; -use core::fmt; -use core::str::FromStr; +use core::{fmt, str::FromStr}; use serde::{Deserialize, Serialize}; use tendermint_proto::Protobuf; use ibc_proto::ibc::core::channel::v1::{ - Channel as RawChannel, Counterparty as RawCounterparty, - IdentifiedChannel as RawIdentifiedChannel, + Channel as RawChannel, Counterparty as RawCounterparty, + IdentifiedChannel as RawIdentifiedChannel, }; -use crate::core::ics02_client::height::Height; -use crate::core::ics04_channel::{error::Error, packet::Sequence, Version}; -use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; -use crate::events::WithBlockDataType; +use crate::{ + core::{ + ics02_client::height::Height, + ics04_channel::{error::Error, packet::Sequence, Version}, + ics24_host::identifier::{ChannelId, ConnectionId, PortId}, + }, + events::WithBlockDataType, +}; #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct IdentifiedChannelEnd { - pub port_id: PortId, - pub channel_id: ChannelId, - pub channel_end: ChannelEnd, + pub port_id: PortId, + pub channel_id: ChannelId, + pub channel_end: ChannelEnd, } impl IdentifiedChannelEnd { - pub fn new(port_id: PortId, channel_id: ChannelId, channel_end: ChannelEnd) -> Self { - IdentifiedChannelEnd { - port_id, - channel_id, - channel_end, - } - } + pub fn new(port_id: PortId, channel_id: ChannelId, channel_end: ChannelEnd) -> Self { + IdentifiedChannelEnd { port_id, channel_id, channel_end } + } } impl Protobuf for IdentifiedChannelEnd {} impl TryFrom for IdentifiedChannelEnd { - type Error = Error; - - fn try_from(value: RawIdentifiedChannel) -> Result { - let raw_channel_end = RawChannel { - state: value.state, - ordering: value.ordering, - counterparty: value.counterparty, - connection_hops: value.connection_hops, - version: value.version, - }; - - Ok(IdentifiedChannelEnd { - port_id: value.port_id.parse().map_err(Error::identifier)?, - channel_id: value.channel_id.parse().map_err(Error::identifier)?, - channel_end: raw_channel_end.try_into()?, - }) - } + type Error = Error; + + fn try_from(value: RawIdentifiedChannel) -> Result { + let raw_channel_end = RawChannel { + state: value.state, + ordering: value.ordering, + counterparty: value.counterparty, + connection_hops: value.connection_hops, + version: value.version, + }; + + Ok(IdentifiedChannelEnd { + port_id: value.port_id.parse().map_err(Error::identifier)?, + channel_id: value.channel_id.parse().map_err(Error::identifier)?, + channel_end: raw_channel_end.try_into()?, + }) + } } impl From for RawIdentifiedChannel { - fn from(value: IdentifiedChannelEnd) -> Self { - RawIdentifiedChannel { - state: value.channel_end.state as i32, - ordering: value.channel_end.ordering as i32, - counterparty: Some(value.channel_end.counterparty().clone().into()), - connection_hops: value - .channel_end - .connection_hops - .iter() - .map(|v| v.as_str().to_string()) - .collect(), - version: value.channel_end.version.to_string(), - port_id: value.port_id.to_string(), - channel_id: value.channel_id.to_string(), - } - } + fn from(value: IdentifiedChannelEnd) -> Self { + RawIdentifiedChannel { + state: value.channel_end.state as i32, + ordering: value.channel_end.ordering as i32, + counterparty: Some(value.channel_end.counterparty().clone().into()), + connection_hops: value + .channel_end + .connection_hops + .iter() + .map(|v| v.as_str().to_string()) + .collect(), + version: value.channel_end.version.to_string(), + port_id: value.port_id.to_string(), + channel_id: value.channel_id.to_string(), + } + } } #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct ChannelEnd { - pub state: State, - pub ordering: Order, - pub remote: Counterparty, - pub connection_hops: Vec, - pub version: Version, + pub state: State, + pub ordering: Order, + pub remote: Counterparty, + pub connection_hops: Vec, + pub version: Version, } impl Default for ChannelEnd { - fn default() -> Self { - ChannelEnd { - state: State::Uninitialized, - ordering: Default::default(), - remote: Counterparty::default(), - connection_hops: Vec::new(), - version: Version::default(), - } - } + fn default() -> Self { + ChannelEnd { + state: State::Uninitialized, + ordering: Default::default(), + remote: Counterparty::default(), + connection_hops: Vec::new(), + version: Version::default(), + } + } } impl Protobuf for ChannelEnd {} impl TryFrom for ChannelEnd { - type Error = Error; - - fn try_from(value: RawChannel) -> Result { - let chan_state: State = State::from_i32(value.state)?; - - if chan_state == State::Uninitialized { - return Ok(ChannelEnd::default()); - } - - let chan_ordering = Order::from_i32(value.ordering)?; - - // Assemble the 'remote' attribute of the Channel, which represents the Counterparty. - let remote = value - .counterparty - .ok_or_else(Error::missing_counterparty)? - .try_into()?; - - // Parse each item in connection_hops into a ConnectionId. - let connection_hops = value - .connection_hops - .into_iter() - .map(|conn_id| ConnectionId::from_str(conn_id.as_str())) - .collect::, _>>() - .map_err(Error::identifier)?; - - let version = value.version.into(); - - Ok(ChannelEnd::new( - chan_state, - chan_ordering, - remote, - connection_hops, - version, - )) - } + type Error = Error; + + fn try_from(value: RawChannel) -> Result { + let chan_state: State = State::from_i32(value.state)?; + + if chan_state == State::Uninitialized { + return Ok(ChannelEnd::default()) + } + + let chan_ordering = Order::from_i32(value.ordering)?; + + // Assemble the 'remote' attribute of the Channel, which represents the Counterparty. + let remote = value.counterparty.ok_or_else(Error::missing_counterparty)?.try_into()?; + + // Parse each item in connection_hops into a ConnectionId. + let connection_hops = value + .connection_hops + .into_iter() + .map(|conn_id| ConnectionId::from_str(conn_id.as_str())) + .collect::, _>>() + .map_err(Error::identifier)?; + + let version = value.version.into(); + + Ok(ChannelEnd::new(chan_state, chan_ordering, remote, connection_hops, version)) + } } impl From for RawChannel { - fn from(value: ChannelEnd) -> Self { - RawChannel { - state: value.state as i32, - ordering: value.ordering as i32, - counterparty: Some(value.counterparty().clone().into()), - connection_hops: value - .connection_hops - .iter() - .map(|v| v.as_str().to_string()) - .collect(), - version: value.version.to_string(), - } - } + fn from(value: ChannelEnd) -> Self { + RawChannel { + state: value.state as i32, + ordering: value.ordering as i32, + counterparty: Some(value.counterparty().clone().into()), + connection_hops: value.connection_hops.iter().map(|v| v.as_str().to_string()).collect(), + version: value.version.to_string(), + } + } } impl ChannelEnd { - /// Creates a new ChannelEnd in state Uninitialized and other fields parametrized. - pub fn new( - state: State, - ordering: Order, - remote: Counterparty, - connection_hops: Vec, - version: Version, - ) -> Self { - Self { - state, - ordering, - remote, - connection_hops, - version, - } - } - - /// Updates the ChannelEnd to assume a new State 's'. - pub fn set_state(&mut self, s: State) { - self.state = s; - } - - pub fn set_version(&mut self, v: Version) { - self.version = v; - } - - pub fn set_counterparty_channel_id(&mut self, c: ChannelId) { - self.remote.channel_id = Some(c); - } - - /// Returns `true` if this `ChannelEnd` is in state [`State::Open`]. - pub fn is_open(&self) -> bool { - self.state_matches(&State::Open) - } - - pub fn state(&self) -> &State { - &self.state - } - - pub fn ordering(&self) -> &Order { - &self.ordering - } - - pub fn counterparty(&self) -> &Counterparty { - &self.remote - } - - pub fn connection_hops(&self) -> &Vec { - &self.connection_hops - } - - pub fn version(&self) -> &Version { - &self.version - } - - pub fn validate_basic(&self) -> Result<(), Error> { - if self.connection_hops.len() != 1 { - return Err(Error::invalid_connection_hops_length( - 1, - self.connection_hops.len(), - )); - } - self.counterparty().validate_basic() - } - - /// Helper function to compare the state of this end with another state. - pub fn state_matches(&self, other: &State) -> bool { - self.state.eq(other) - } - - /// Helper function to compare the order of this end with another order. - pub fn order_matches(&self, other: &Order) -> bool { - self.ordering.eq(other) - } - - #[allow(clippy::ptr_arg)] - pub fn connection_hops_matches(&self, other: &Vec) -> bool { - self.connection_hops.eq(other) - } - - pub fn counterparty_matches(&self, other: &Counterparty) -> bool { - self.counterparty().eq(other) - } - - pub fn version_matches(&self, other: &Version) -> bool { - self.version().eq(other) - } + /// Creates a new ChannelEnd in state Uninitialized and other fields parametrized. + pub fn new( + state: State, + ordering: Order, + remote: Counterparty, + connection_hops: Vec, + version: Version, + ) -> Self { + Self { state, ordering, remote, connection_hops, version } + } + + /// Updates the ChannelEnd to assume a new State 's'. + pub fn set_state(&mut self, s: State) { + self.state = s; + } + + pub fn set_version(&mut self, v: Version) { + self.version = v; + } + + pub fn set_counterparty_channel_id(&mut self, c: ChannelId) { + self.remote.channel_id = Some(c); + } + + /// Returns `true` if this `ChannelEnd` is in state [`State::Open`]. + pub fn is_open(&self) -> bool { + self.state_matches(&State::Open) + } + + pub fn state(&self) -> &State { + &self.state + } + + pub fn ordering(&self) -> &Order { + &self.ordering + } + + pub fn counterparty(&self) -> &Counterparty { + &self.remote + } + + pub fn connection_hops(&self) -> &Vec { + &self.connection_hops + } + + pub fn version(&self) -> &Version { + &self.version + } + + pub fn validate_basic(&self) -> Result<(), Error> { + if self.connection_hops.len() != 1 { + return Err(Error::invalid_connection_hops_length(1, self.connection_hops.len())) + } + self.counterparty().validate_basic() + } + + /// Helper function to compare the state of this end with another state. + pub fn state_matches(&self, other: &State) -> bool { + self.state.eq(other) + } + + /// Helper function to compare the order of this end with another order. + pub fn order_matches(&self, other: &Order) -> bool { + self.ordering.eq(other) + } + + #[allow(clippy::ptr_arg)] + pub fn connection_hops_matches(&self, other: &Vec) -> bool { + self.connection_hops.eq(other) + } + + pub fn counterparty_matches(&self, other: &Counterparty) -> bool { + self.counterparty().eq(other) + } + + pub fn version_matches(&self, other: &Version) -> bool { + self.version().eq(other) + } } #[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] pub struct Counterparty { - pub port_id: PortId, - pub channel_id: Option, + pub port_id: PortId, + pub channel_id: Option, } impl Counterparty { - pub fn new(port_id: PortId, channel_id: Option) -> Self { - Self { - port_id, - channel_id, - } - } - - pub fn port_id(&self) -> &PortId { - &self.port_id - } - - pub fn channel_id(&self) -> Option<&ChannelId> { - self.channel_id.as_ref() - } - - pub fn validate_basic(&self) -> Result<(), Error> { - Ok(()) - } + pub fn new(port_id: PortId, channel_id: Option) -> Self { + Self { port_id, channel_id } + } + + pub fn port_id(&self) -> &PortId { + &self.port_id + } + + pub fn channel_id(&self) -> Option<&ChannelId> { + self.channel_id.as_ref() + } + + pub fn validate_basic(&self) -> Result<(), Error> { + Ok(()) + } } impl Protobuf for Counterparty {} impl TryFrom for Counterparty { - type Error = Error; - - fn try_from(value: RawCounterparty) -> Result { - let channel_id = Some(value.channel_id) - .filter(|x| !x.is_empty()) - .map(|v| FromStr::from_str(v.as_str())) - .transpose() - .map_err(Error::identifier)?; - Ok(Counterparty::new( - value.port_id.parse().map_err(Error::identifier)?, - channel_id, - )) - } + type Error = Error; + + fn try_from(value: RawCounterparty) -> Result { + let channel_id = Some(value.channel_id) + .filter(|x| !x.is_empty()) + .map(|v| FromStr::from_str(v.as_str())) + .transpose() + .map_err(Error::identifier)?; + Ok(Counterparty::new(value.port_id.parse().map_err(Error::identifier)?, channel_id)) + } } impl From for RawCounterparty { - fn from(value: Counterparty) -> Self { - RawCounterparty { - port_id: value.port_id.as_str().to_string(), - channel_id: value - .channel_id - .map_or_else(|| "".to_string(), |v| v.to_string()), - } - } + fn from(value: Counterparty) -> Self { + RawCounterparty { + port_id: value.port_id.as_str().to_string(), + channel_id: value.channel_id.map_or_else(|| "".to_string(), |v| v.to_string()), + } + } } #[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] pub enum Order { - None = 0, - Unordered = 1, - Ordered = 2, + None = 0, + Unordered = 1, + Ordered = 2, } impl Default for Order { - fn default() -> Self { - Order::Unordered - } + fn default() -> Self { + Order::Unordered + } } impl fmt::Display for Order { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.as_str()) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.as_str()) + } } impl Order { - /// Yields the Order as a string - pub fn as_str(&self) -> &'static str { - match self { - Self::None => "UNINITIALIZED", - Self::Unordered => "ORDER_UNORDERED", - Self::Ordered => "ORDER_ORDERED", - } - } - - // Parses the Order out from a i32. - pub fn from_i32(nr: i32) -> Result { - match nr { - 0 => Ok(Self::None), - 1 => Ok(Self::Unordered), - 2 => Ok(Self::Ordered), - _ => Err(Error::unknown_order_type(nr.to_string())), - } - } + /// Yields the Order as a string + pub fn as_str(&self) -> &'static str { + match self { + Self::None => "UNINITIALIZED", + Self::Unordered => "ORDER_UNORDERED", + Self::Ordered => "ORDER_ORDERED", + } + } + + // Parses the Order out from a i32. + pub fn from_i32(nr: i32) -> Result { + match nr { + 0 => Ok(Self::None), + 1 => Ok(Self::Unordered), + 2 => Ok(Self::Ordered), + _ => Err(Error::unknown_order_type(nr.to_string())), + } + } } impl FromStr for Order { - type Err = Error; - - fn from_str(s: &str) -> Result { - match s.to_lowercase().trim_start_matches("order_") { - "uninitialized" => Ok(Self::None), - "unordered" => Ok(Self::Unordered), - "ordered" => Ok(Self::Ordered), - _ => Err(Error::unknown_order_type(s.to_string())), - } - } + type Err = Error; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().trim_start_matches("order_") { + "uninitialized" => Ok(Self::None), + "unordered" => Ok(Self::Unordered), + "ordered" => Ok(Self::Ordered), + _ => Err(Error::unknown_order_type(s.to_string())), + } + } } #[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub enum State { - Uninitialized = 0, - Init = 1, - TryOpen = 2, - Open = 3, - Closed = 4, + Uninitialized = 0, + Init = 1, + TryOpen = 2, + Open = 3, + Closed = 4, } impl State { - /// Yields the state as a string - pub fn as_string(&self) -> &'static str { - match self { - Self::Uninitialized => "UNINITIALIZED", - Self::Init => "INIT", - Self::TryOpen => "TRYOPEN", - Self::Open => "OPEN", - Self::Closed => "CLOSED", - } - } - - // Parses the State out from a i32. - pub fn from_i32(s: i32) -> Result { - match s { - 0 => Ok(Self::Uninitialized), - 1 => Ok(Self::Init), - 2 => Ok(Self::TryOpen), - 3 => Ok(Self::Open), - 4 => Ok(Self::Closed), - _ => Err(Error::unknown_state(s)), - } - } - - /// Returns whether or not this channel state is `Open`. - pub fn is_open(self) -> bool { - self == State::Open - } - - /// Returns whether or not the channel with this state - /// has progressed less or the same than the argument. - /// - /// # Example - /// ```rust,ignore - /// assert!(State::Init.less_or_equal_progress(State::Open)); - /// assert!(State::TryOpen.less_or_equal_progress(State::TryOpen)); - /// assert!(!State::Closed.less_or_equal_progress(State::Open)); - /// ``` - pub fn less_or_equal_progress(self, other: Self) -> bool { - self as u32 <= other as u32 - } + /// Yields the state as a string + pub fn as_string(&self) -> &'static str { + match self { + Self::Uninitialized => "UNINITIALIZED", + Self::Init => "INIT", + Self::TryOpen => "TRYOPEN", + Self::Open => "OPEN", + Self::Closed => "CLOSED", + } + } + + // Parses the State out from a i32. + pub fn from_i32(s: i32) -> Result { + match s { + 0 => Ok(Self::Uninitialized), + 1 => Ok(Self::Init), + 2 => Ok(Self::TryOpen), + 3 => Ok(Self::Open), + 4 => Ok(Self::Closed), + _ => Err(Error::unknown_state(s)), + } + } + + /// Returns whether or not this channel state is `Open`. + pub fn is_open(self) -> bool { + self == State::Open + } + + /// Returns whether or not the channel with this state + /// has progressed less or the same than the argument. + /// + /// # Example + /// ```rust,ignore + /// assert!(State::Init.less_or_equal_progress(State::Open)); + /// assert!(State::TryOpen.less_or_equal_progress(State::TryOpen)); + /// assert!(!State::Closed.less_or_equal_progress(State::Open)); + /// ``` + pub fn less_or_equal_progress(self, other: Self) -> bool { + self as u32 <= other as u32 + } } /// Provides a `to_string` method. impl core::fmt::Display for State { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "{}", self.as_string()) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "{}", self.as_string()) + } } /// Used to query a packet event, identified by `event_id`, for specific channel and sequences. /// The query is preformed for the chain context at `height`. #[derive(Clone, Debug)] pub struct QueryPacketEventDataRequest { - pub event_id: WithBlockDataType, - pub source_channel_id: ChannelId, - pub source_port_id: PortId, - pub destination_channel_id: ChannelId, - pub destination_port_id: PortId, - pub sequences: Vec, - pub height: Height, + pub event_id: WithBlockDataType, + pub source_channel_id: ChannelId, + pub source_port_id: PortId, + pub destination_channel_id: ChannelId, + pub destination_port_id: PortId, + pub sequences: Vec, + pub height: Height, } #[cfg(test)] pub mod test_util { - use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::Channel as RawChannel; - use ibc_proto::ibc::core::channel::v1::Counterparty as RawCounterparty; - - /// Returns a dummy `RawCounterparty`, for testing only! - /// Can be optionally parametrized with a specific channel identifier. - pub fn get_dummy_raw_counterparty() -> RawCounterparty { - RawCounterparty { - port_id: PortId::default().to_string(), - channel_id: ChannelId::default().to_string(), - } - } - - /// Returns a dummy `RawChannel`, for testing only! - pub fn get_dummy_raw_channel_end() -> RawChannel { - RawChannel { - state: 1, - ordering: 1, - counterparty: Some(get_dummy_raw_counterparty()), - connection_hops: vec![ConnectionId::default().to_string()], - version: "ics20".to_string(), // The version is not validated. - } - } + use crate::{ + core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}, + prelude::*, + }; + use ibc_proto::ibc::core::channel::v1::{ + Channel as RawChannel, Counterparty as RawCounterparty, + }; + + /// Returns a dummy `RawCounterparty`, for testing only! + /// Can be optionally parametrized with a specific channel identifier. + pub fn get_dummy_raw_counterparty() -> RawCounterparty { + RawCounterparty { + port_id: PortId::default().to_string(), + channel_id: ChannelId::default().to_string(), + } + } + + /// Returns a dummy `RawChannel`, for testing only! + pub fn get_dummy_raw_channel_end() -> RawChannel { + RawChannel { + state: 1, + ordering: 1, + counterparty: Some(get_dummy_raw_counterparty()), + connection_hops: vec![ConnectionId::default().to_string()], + version: "ics20".to_string(), // The version is not validated. + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; - - use core::str::FromStr; - use test_log::test; - - use ibc_proto::ibc::core::channel::v1::Channel as RawChannel; - - use crate::core::ics04_channel::channel::test_util::get_dummy_raw_channel_end; - use crate::core::ics04_channel::channel::ChannelEnd; - - #[test] - fn channel_end_try_from_raw() { - let raw_channel_end = get_dummy_raw_channel_end(); - - let empty_raw_channel_end = RawChannel { - counterparty: None, - ..raw_channel_end.clone() - }; - - struct Test { - name: String, - params: RawChannel, - want_pass: bool, - } - - let tests: Vec = vec![ - Test { - name: "Raw channel end with missing counterparty".to_string(), - params: empty_raw_channel_end, - want_pass: false, - }, - Test { - name: "Raw channel end with incorrect state".to_string(), - params: RawChannel { - state: -1, - ..raw_channel_end.clone() - }, - want_pass: false, - }, - Test { - name: "Raw channel end with incorrect ordering".to_string(), - params: RawChannel { - ordering: -1, - ..raw_channel_end.clone() - }, - want_pass: false, - }, - Test { - name: "Raw channel end with incorrect connection id in connection hops".to_string(), - params: RawChannel { - connection_hops: vec!["connection*".to_string()].into_iter().collect(), - ..raw_channel_end.clone() - }, - want_pass: false, - }, - Test { - name: "Raw channel end with incorrect connection id (has blank space)".to_string(), - params: RawChannel { - connection_hops: vec!["con nection".to_string()].into_iter().collect(), - ..raw_channel_end.clone() - }, - want_pass: false, - }, - Test { - name: "Raw channel end with two correct connection ids in connection hops" - .to_string(), - params: RawChannel { - connection_hops: vec!["connection1".to_string(), "connection2".to_string()] - .into_iter() - .collect(), - ..raw_channel_end.clone() - }, - want_pass: true, - }, - Test { - name: "Raw channel end with correct params".to_string(), - params: raw_channel_end, - want_pass: true, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let p = test.params.clone(); - - let ce_result = ChannelEnd::try_from(p); - - assert_eq!( - test.want_pass, - ce_result.is_ok(), - "ChannelEnd::try_from() failed for test {}, \nmsg{:?} with error {:?}", - test.name, - test.params.clone(), - ce_result.err(), - ); - } - } - - #[test] - fn parse_channel_ordering_type() { - use super::Order; - - struct Test { - ordering: &'static str, - want_res: Order, - want_err: bool, - } - let tests: Vec = vec![ - Test { - ordering: "UNINITIALIZED", - want_res: Order::None, - want_err: false, - }, - Test { - ordering: "UNORDERED", - want_res: Order::Unordered, - want_err: false, - }, - Test { - ordering: "ORDERED", - want_res: Order::Ordered, - want_err: false, - }, - Test { - ordering: "UNKNOWN_ORDER", - want_res: Order::None, - want_err: true, - }, - ] - .into_iter() - .collect(); - - for test in tests { - match Order::from_str(test.ordering) { - Ok(res) => { - assert!(!test.want_err); - assert_eq!(test.want_res, res); - } - Err(_) => assert!(test.want_err, "parse failed"), - } - } - } + use crate::prelude::*; + + use core::str::FromStr; + use test_log::test; + + use ibc_proto::ibc::core::channel::v1::Channel as RawChannel; + + use crate::core::ics04_channel::channel::{test_util::get_dummy_raw_channel_end, ChannelEnd}; + + #[test] + fn channel_end_try_from_raw() { + let raw_channel_end = get_dummy_raw_channel_end(); + + let empty_raw_channel_end = RawChannel { counterparty: None, ..raw_channel_end.clone() }; + + struct Test { + name: String, + params: RawChannel, + want_pass: bool, + } + + let tests: Vec = vec![ + Test { + name: "Raw channel end with missing counterparty".to_string(), + params: empty_raw_channel_end, + want_pass: false, + }, + Test { + name: "Raw channel end with incorrect state".to_string(), + params: RawChannel { state: -1, ..raw_channel_end.clone() }, + want_pass: false, + }, + Test { + name: "Raw channel end with incorrect ordering".to_string(), + params: RawChannel { ordering: -1, ..raw_channel_end.clone() }, + want_pass: false, + }, + Test { + name: "Raw channel end with incorrect connection id in connection hops".to_string(), + params: RawChannel { + connection_hops: vec!["connection*".to_string()].into_iter().collect(), + ..raw_channel_end.clone() + }, + want_pass: false, + }, + Test { + name: "Raw channel end with incorrect connection id (has blank space)".to_string(), + params: RawChannel { + connection_hops: vec!["con nection".to_string()].into_iter().collect(), + ..raw_channel_end.clone() + }, + want_pass: false, + }, + Test { + name: "Raw channel end with two correct connection ids in connection hops" + .to_string(), + params: RawChannel { + connection_hops: vec!["connection1".to_string(), "connection2".to_string()] + .into_iter() + .collect(), + ..raw_channel_end.clone() + }, + want_pass: true, + }, + Test { + name: "Raw channel end with correct params".to_string(), + params: raw_channel_end, + want_pass: true, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let p = test.params.clone(); + + let ce_result = ChannelEnd::try_from(p); + + assert_eq!( + test.want_pass, + ce_result.is_ok(), + "ChannelEnd::try_from() failed for test {}, \nmsg{:?} with error {:?}", + test.name, + test.params.clone(), + ce_result.err(), + ); + } + } + + #[test] + fn parse_channel_ordering_type() { + use super::Order; + + struct Test { + ordering: &'static str, + want_res: Order, + want_err: bool, + } + let tests: Vec = vec![ + Test { ordering: "UNINITIALIZED", want_res: Order::None, want_err: false }, + Test { ordering: "UNORDERED", want_res: Order::Unordered, want_err: false }, + Test { ordering: "ORDERED", want_res: Order::Ordered, want_err: false }, + Test { ordering: "UNKNOWN_ORDER", want_res: Order::None, want_err: true }, + ] + .into_iter() + .collect(); + + for test in tests { + match Order::from_str(test.ordering) { + Ok(res) => { + assert!(!test.want_err); + assert_eq!(test.want_res, res); + }, + Err(_) => assert!(test.want_err, "parse failed"), + } + } + } } diff --git a/modules/src/core/ics04_channel/commitment.rs b/modules/src/core/ics04_channel/commitment.rs index 100a989627..7798ca9419 100644 --- a/modules/src/core/ics04_channel/commitment.rs +++ b/modules/src/core/ics04_channel/commitment.rs @@ -7,15 +7,15 @@ use serde_derive::{Deserialize, Serialize}; pub struct PacketCommitment(Vec); impl PacketCommitment { - pub fn into_vec(self) -> Vec { - self.0 - } + pub fn into_vec(self) -> Vec { + self.0 + } } impl From> for PacketCommitment { - fn from(bytes: Vec) -> Self { - Self(bytes) - } + fn from(bytes: Vec) -> Self { + Self(bytes) + } } /// Acknowledgement commitment to be stored @@ -23,13 +23,13 @@ impl From> for PacketCommitment { pub struct AcknowledgementCommitment(Vec); impl AcknowledgementCommitment { - pub fn into_vec(self) -> Vec { - self.0 - } + pub fn into_vec(self) -> Vec { + self.0 + } } impl From> for AcknowledgementCommitment { - fn from(bytes: Vec) -> Self { - Self(bytes) - } + fn from(bytes: Vec) -> Self { + Self(bytes) + } } diff --git a/modules/src/core/ics04_channel/context.rs b/modules/src/core/ics04_channel/context.rs index 2f42208af3..3a28b88320 100644 --- a/modules/src/core/ics04_channel/context.rs +++ b/modules/src/core/ics04_channel/context.rs @@ -1,302 +1,272 @@ //! ICS4 (channel) context. The two traits `ChannelReader ` and `ChannelKeeper` define //! the interface that any host chain must implement to be able to process any `ChannelMsg`. -//! use core::time::Duration; use num_traits::float::FloatCore; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::handler::recv_packet::RecvPacketResult; -use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; -use crate::core::ics04_channel::msgs::acknowledgement::Acknowledgement; -use crate::core::ics04_channel::{error::Error, packet::Receipt}; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::prelude::*; -use crate::timestamp::Timestamp; -use crate::Height; - -use super::packet::{PacketResult, Sequence}; +use crate::{ + core::{ + ics04_channel::{ + channel::ChannelEnd, + commitment::{AcknowledgementCommitment, PacketCommitment}, + error::Error, + handler::{recv_packet::RecvPacketResult, ChannelIdState, ChannelResult}, + msgs::acknowledgement::Acknowledgement, + packet::Receipt, + }, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + }, + prelude::*, + timestamp::Timestamp, + Height, +}; + +use super::packet::{Packet, PacketResult, Sequence}; /// A context supplying all the necessary read-only dependencies for processing any `ChannelMsg`. pub trait ChannelReader { - /// Returns the ChannelEnd for the given `port_id` and `chan_id`. - fn channel_end(&self, port_channel_id: &(PortId, ChannelId)) -> Result; - - /// Returns the ConnectionState for the given identifier `connection_id`. - fn connection_end(&self, connection_id: &ConnectionId) -> Result; - - fn connection_channels(&self, cid: &ConnectionId) -> Result, Error>; - - /// Returns the ClientState for the given identifier `client_id`. Necessary dependency towards - /// proof verification. - fn client_state(&self, client_id: &ClientId) -> Result; - - fn client_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result; - - fn get_next_sequence_send( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Result; - - fn get_next_sequence_recv( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Result; - - fn get_next_sequence_ack( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Result; - - fn get_packet_commitment( - &self, - key: &(PortId, ChannelId, Sequence), - ) -> Result; - - fn get_packet_receipt(&self, key: &(PortId, ChannelId, Sequence)) -> Result; - - fn get_packet_acknowledgement( - &self, - key: &(PortId, ChannelId, Sequence), - ) -> Result; - - fn packet_commitment( - &self, - packet_data: Vec, - timeout_height: Height, - timeout_timestamp: Timestamp, - ) -> PacketCommitment { - let mut input = timeout_timestamp.nanoseconds().to_be_bytes().to_vec(); - let revision_number = timeout_height.revision_number.to_be_bytes(); - input.append(&mut revision_number.to_vec()); - let revision_height = timeout_height.revision_height.to_be_bytes(); - input.append(&mut revision_height.to_vec()); - let data = self.hash(packet_data); - input.append(&mut data.to_vec()); - self.hash(input).into() - } - - fn ack_commitment(&self, ack: Acknowledgement) -> AcknowledgementCommitment { - self.hash(ack.into_bytes()).into() - } - - /// A hashing function for packet commitments - fn hash(&self, value: Vec) -> Vec; - - /// Returns the current height of the local chain. - fn host_height(&self) -> Height; - - /// Returns the current timestamp of the local chain. - fn host_timestamp(&self) -> Timestamp { - let pending_consensus_state = self - .pending_host_consensus_state() - .expect("host must have pending consensus state"); - pending_consensus_state.timestamp() - } - - /// Returns the `ConsensusState` of the host (local) chain at a specific height. - fn host_consensus_state(&self, height: Height) -> Result; - - /// Returns the pending `ConsensusState` of the host (local) chain. - fn pending_host_consensus_state(&self) -> Result; - - /// Returns the time when the client state for the given [`ClientId`] was updated with a header for the given [`Height`] - fn client_update_time(&self, client_id: &ClientId, height: Height) -> Result; - - /// Returns the height when the client state for the given [`ClientId`] was updated with a header for the given [`Height`] - fn client_update_height(&self, client_id: &ClientId, height: Height) -> Result; - - /// Returns a counter on the number of channel ids have been created thus far. - /// The value of this counter should increase only via method - /// `ChannelKeeper::increase_channel_counter`. - fn channel_counter(&self) -> Result; - - /// Returns the maximum expected time per block - fn max_expected_time_per_block(&self) -> Duration; - - /// Calculates the block delay period using the connection's delay period and the maximum - /// expected time per block. - fn block_delay(&self, delay_period_time: Duration) -> u64 { - calculate_block_delay(delay_period_time, self.max_expected_time_per_block()) - } + /// Returns the ChannelEnd for the given `port_id` and `chan_id`. + fn channel_end(&self, port_channel_id: &(PortId, ChannelId)) -> Result; + + fn connection_channels(&self, cid: &ConnectionId) -> Result, Error>; + + fn get_next_sequence_send( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result; + + fn get_next_sequence_recv( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result; + + fn get_next_sequence_ack( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result; + + fn get_packet_commitment( + &self, + key: &(PortId, ChannelId, Sequence), + ) -> Result; + + fn get_packet_receipt(&self, key: &(PortId, ChannelId, Sequence)) -> Result; + + fn get_packet_acknowledgement( + &self, + key: &(PortId, ChannelId, Sequence), + ) -> Result; + + fn packet_commitment( + &self, + packet_data: Vec, + timeout_height: Height, + timeout_timestamp: Timestamp, + ) -> PacketCommitment { + let mut input = timeout_timestamp.nanoseconds().to_be_bytes().to_vec(); + let revision_number = timeout_height.revision_number.to_be_bytes(); + input.append(&mut revision_number.to_vec()); + let revision_height = timeout_height.revision_height.to_be_bytes(); + input.append(&mut revision_height.to_vec()); + let data = self.hash(packet_data); + input.append(&mut data.to_vec()); + self.hash(input).into() + } + + fn ack_commitment(&self, ack: Acknowledgement) -> AcknowledgementCommitment { + self.hash(ack.into_bytes()).into() + } + + /// A Sha2_256 hashing function + fn hash(&self, value: Vec) -> Vec; + + /// Returns the time when the client state for the given [`ClientId`] was updated with a header + /// for the given [`Height`] + fn client_update_time(&self, client_id: &ClientId, height: Height) -> Result; + + /// Returns the height when the client state for the given [`ClientId`] was updated with a + /// header for the given [`Height`] + fn client_update_height(&self, client_id: &ClientId, height: Height) -> Result; + + /// Returns a counter on the number of channel ids have been created thus far. + /// The value of this counter should increase only via method + /// `ChannelKeeper::increase_channel_counter`. + fn channel_counter(&self) -> Result; + + /// Returns the maximum expected time per block + fn max_expected_time_per_block(&self) -> Duration; + + /// Calculates the block delay period using the connection's delay period and the maximum + /// expected time per block. + fn block_delay(&self, delay_period_time: Duration) -> u64 { + calculate_block_delay(delay_period_time, self.max_expected_time_per_block()) + } } /// A context supplying all the necessary write-only dependencies (i.e., storage writing facility) /// for processing any `ChannelMsg`. pub trait ChannelKeeper { - fn store_channel_result(&mut self, result: ChannelResult) -> Result<(), Error> { - // The handler processed this channel & some modifications occurred, store the new end. - self.store_channel( - (result.port_id.clone(), result.channel_id), - &result.channel_end, - )?; - - // The channel identifier was freshly brewed. - // Increase counter & initialize seq. nrs. - if matches!(result.channel_id_state, ChannelIdState::Generated) { - self.increase_channel_counter(); - - // Associate also the channel end to its connection. - self.store_connection_channels( - result.channel_end.connection_hops()[0].clone(), - &(result.port_id.clone(), result.channel_id), - )?; - - // Initialize send, recv, and ack sequence numbers. - self.store_next_sequence_send((result.port_id.clone(), result.channel_id), 1.into())?; - self.store_next_sequence_recv((result.port_id.clone(), result.channel_id), 1.into())?; - self.store_next_sequence_ack((result.port_id, result.channel_id), 1.into())?; - } - - Ok(()) - } - - fn store_packet_result(&mut self, general_result: PacketResult) -> Result<(), Error> { - match general_result { - PacketResult::Send(res) => { - self.store_next_sequence_send( - (res.port_id.clone(), res.channel_id), - res.seq_number, - )?; - - self.store_packet_commitment( - (res.port_id.clone(), res.channel_id, res.seq), - res.commitment, - )?; - } - PacketResult::Recv(res) => { - let res = match res { - RecvPacketResult::Success(res) => res, - RecvPacketResult::NoOp => unreachable!(), - }; - match res.receipt { - None => { - // Ordered channel - self.store_next_sequence_recv( - (res.port_id.clone(), res.channel_id), - res.seq_number, - )? - } - Some(r) => { - // Unordered channel - self.store_packet_receipt( - (res.port_id.clone(), res.channel_id, res.seq), - r, - )? - } - } - } - PacketResult::WriteAck(res) => { - self.store_packet_acknowledgement( - (res.port_id.clone(), res.channel_id, res.seq), - res.ack_commitment, - )?; - } - PacketResult::Ack(res) => { - match res.seq_number { - Some(s) => { - //Ordered Channel - self.store_next_sequence_ack((res.port_id.clone(), res.channel_id), s)?; - } - None => { - //Unordered Channel - self.delete_packet_commitment(( - res.port_id.clone(), - res.channel_id, - res.seq, - ))?; - } - } - } - PacketResult::Timeout(res) => { - if let Some(c) = res.channel { - //Ordered Channel - self.store_channel((res.port_id.clone(), res.channel_id), &c)?; - } - self.delete_packet_commitment((res.port_id.clone(), res.channel_id, res.seq))?; - } - } - Ok(()) - } - - fn store_packet_commitment( - &mut self, - key: (PortId, ChannelId, Sequence), - commitment: PacketCommitment, - ) -> Result<(), Error>; - - fn delete_packet_commitment(&mut self, key: (PortId, ChannelId, Sequence)) - -> Result<(), Error>; - - fn store_packet_receipt( - &mut self, - key: (PortId, ChannelId, Sequence), - receipt: Receipt, - ) -> Result<(), Error>; - - fn store_packet_acknowledgement( - &mut self, - key: (PortId, ChannelId, Sequence), - ack_commitment: AcknowledgementCommitment, - ) -> Result<(), Error>; - - fn delete_packet_acknowledgement( - &mut self, - key: (PortId, ChannelId, Sequence), - ) -> Result<(), Error>; - - fn store_connection_channels( - &mut self, - conn_id: ConnectionId, - port_channel_id: &(PortId, ChannelId), - ) -> Result<(), Error>; - - /// Stores the given channel_end at a path associated with the port_id and channel_id. - fn store_channel( - &mut self, - port_channel_id: (PortId, ChannelId), - channel_end: &ChannelEnd, - ) -> Result<(), Error>; - - fn store_next_sequence_send( - &mut self, - port_channel_id: (PortId, ChannelId), - seq: Sequence, - ) -> Result<(), Error>; - - fn store_next_sequence_recv( - &mut self, - port_channel_id: (PortId, ChannelId), - seq: Sequence, - ) -> Result<(), Error>; - - fn store_next_sequence_ack( - &mut self, - port_channel_id: (PortId, ChannelId), - seq: Sequence, - ) -> Result<(), Error>; - - /// Called upon channel identifier creation (Init or Try message processing). - /// Increases the counter which keeps track of how many channels have been created. - /// Should never fail. - fn increase_channel_counter(&mut self); + fn store_channel_result(&mut self, result: ChannelResult) -> Result<(), Error> { + // The handler processed this channel & some modifications occurred, store the new end. + self.store_channel((result.port_id.clone(), result.channel_id), &result.channel_end)?; + + // The channel identifier was freshly brewed. + // Increase counter & initialize seq. nrs. + if matches!(result.channel_id_state, ChannelIdState::Generated) { + self.increase_channel_counter(); + + // Associate also the channel end to its connection. + self.store_connection_channels( + result.channel_end.connection_hops()[0].clone(), + &(result.port_id.clone(), result.channel_id), + )?; + + // Initialize send, recv, and ack sequence numbers. + self.store_next_sequence_send((result.port_id.clone(), result.channel_id), 1.into())?; + self.store_next_sequence_recv((result.port_id.clone(), result.channel_id), 1.into())?; + self.store_next_sequence_ack((result.port_id, result.channel_id), 1.into())?; + } + + Ok(()) + } + + fn store_packet_result(&mut self, general_result: PacketResult) -> Result<(), Error> { + match general_result { + PacketResult::Send(res) => { + self.store_next_sequence_send( + (res.port_id.clone(), res.channel_id), + res.seq_number, + )?; + + self.store_packet_commitment( + (res.port_id.clone(), res.channel_id, res.seq), + res.commitment, + )?; + + self.store_send_packet((res.port_id.clone(), res.channel_id, res.seq), res.packet)?; + }, + PacketResult::Recv(res) => match res { + RecvPacketResult::Ordered { port_id, channel_id, next_seq_recv, packet } => { + self.store_next_sequence_recv((port_id.clone(), channel_id), next_seq_recv)?; + self.store_recv_packet((port_id, channel_id, packet.sequence), packet)? + }, + RecvPacketResult::Unordered { port_id, channel_id, sequence, receipt, packet } => { + self.store_packet_receipt((port_id.clone(), channel_id, sequence), receipt)?; + self.store_recv_packet((port_id, channel_id, packet.sequence), packet)? + }, + + RecvPacketResult::NoOp => unreachable!(), + }, + PacketResult::WriteAck(res) => { + self.store_packet_acknowledgement( + (res.port_id.clone(), res.channel_id, res.seq), + res.ack_commitment, + )?; + }, + PacketResult::Ack(res) => { + if let Some(s) = res.seq_number { + //Ordered Channel + self.store_next_sequence_ack((res.port_id.clone(), res.channel_id), s)?; + } + + // Delete packet commitment since packet has been aknowledged + self.delete_packet_commitment((res.port_id.clone(), res.channel_id, res.seq))?; + }, + PacketResult::Timeout(res) => { + if let Some(c) = res.channel { + //Ordered Channel + self.store_channel((res.port_id.clone(), res.channel_id), &c)?; + } + self.delete_packet_commitment((res.port_id.clone(), res.channel_id, res.seq))?; + }, + } + Ok(()) + } + + fn store_packet_commitment( + &mut self, + key: (PortId, ChannelId, Sequence), + commitment: PacketCommitment, + ) -> Result<(), Error>; + + /// Allow implementers to optionally store send packets in storage + fn store_send_packet( + &mut self, + key: (PortId, ChannelId, Sequence), + packet: Packet, + ) -> Result<(), Error>; + + /// Allow implementers to optionally store received packets in storage + fn store_recv_packet( + &mut self, + key: (PortId, ChannelId, Sequence), + packet: Packet, + ) -> Result<(), Error>; + + fn delete_packet_commitment(&mut self, key: (PortId, ChannelId, Sequence)) + -> Result<(), Error>; + + fn store_packet_receipt( + &mut self, + key: (PortId, ChannelId, Sequence), + receipt: Receipt, + ) -> Result<(), Error>; + + fn store_packet_acknowledgement( + &mut self, + key: (PortId, ChannelId, Sequence), + ack_commitment: AcknowledgementCommitment, + ) -> Result<(), Error>; + + fn delete_packet_acknowledgement( + &mut self, + key: (PortId, ChannelId, Sequence), + ) -> Result<(), Error>; + + fn store_connection_channels( + &mut self, + conn_id: ConnectionId, + port_channel_id: &(PortId, ChannelId), + ) -> Result<(), Error>; + + /// Stores the given channel_end at a path associated with the port_id and channel_id. + fn store_channel( + &mut self, + port_channel_id: (PortId, ChannelId), + channel_end: &ChannelEnd, + ) -> Result<(), Error>; + + fn store_next_sequence_send( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Error>; + + fn store_next_sequence_recv( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Error>; + + fn store_next_sequence_ack( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Error>; + + /// Called upon channel identifier creation (Init or Try message processing). + /// Increases the counter which keeps track of how many channels have been created. + /// Should never fail. + fn increase_channel_counter(&mut self); } pub fn calculate_block_delay( - delay_period_time: Duration, - max_expected_time_per_block: Duration, + delay_period_time: Duration, + max_expected_time_per_block: Duration, ) -> u64 { - if max_expected_time_per_block.is_zero() { - return 0; - } + if max_expected_time_per_block.is_zero() { + return 0 + } - FloatCore::ceil(delay_period_time.as_secs_f64() / max_expected_time_per_block.as_secs_f64()) - as u64 + FloatCore::ceil(delay_period_time.as_secs_f64() / max_expected_time_per_block.as_secs_f64()) + as u64 } diff --git a/modules/src/core/ics04_channel/error.rs b/modules/src/core/ics04_channel/error.rs index 18bf45aa97..7b1c1bc3ee 100644 --- a/modules/src/core/ics04_channel/error.rs +++ b/modules/src/core/ics04_channel/error.rs @@ -1,357 +1,381 @@ use super::packet::Sequence; -use crate::core::ics02_client::error as client_error; -use crate::core::ics03_connection::error as connection_error; -use crate::core::ics04_channel::channel::State; -use crate::core::ics05_port::error as port_error; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::prelude::*; -use crate::proofs::ProofError; -use crate::signer::SignerError; -use crate::timestamp::Timestamp; -use crate::Height; +use crate::{ + core::{ + ics02_client::error as client_error, + ics03_connection::error as connection_error, + ics04_channel::channel::State, + ics05_port::error as port_error, + ics24_host::{ + error::ValidationError, + identifier::{ChannelId, ClientId, ConnectionId, PortId}, + }, + }, + prelude::*, + proofs::ProofError, + signer::SignerError, + timestamp::Timestamp, + Height, +}; use flex_error::{define_error, TraceError}; use tendermint_proto::Error as TendermintError; define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - Ics03Connection - [ connection_error::Error ] - | _ | { "ics03 connection error" }, - - Ics05Port - [ port_error::Error ] - | _ | { "ics05 port error" }, - - UnknownState - { state: i32 } - | e | { format_args!("channel state unknown: {}", e.state) }, - - Identifier - [ ValidationError ] - | _ | { "identifier error" }, - - UnknownOrderType - { type_id: String } - | e | { format_args!("channel order type unknown: {}", e.type_id) }, - - InvalidConnectionHopsLength - { expected: usize, actual: usize } - | e | { - format_args!( - "invalid connection hops length: expected {0}; actual {1}", - e.expected, e.actual) - }, - - InvalidPacketCounterparty - { port_id: PortId, channel_id: ChannelId } - | e | { - format_args!( - "packet destination port {} and channel {} doesn't match the counterparty's port/channel", - e.port_id, e.channel_id) - }, - - InvalidVersion - [ TraceError ] - | _ | { "invalid version" }, - - Signer - [ SignerError ] - | _ | { "invalid signer address" }, - - InvalidProof - [ ProofError ] - | _ | { "invalid proof" }, - - MissingHeight - | _ | { "invalid proof: missing height" }, - - MissingNextRecvSeq - { port_channel_id: (PortId, ChannelId) } - | e | { - format_args!("Missing sequence number for receiving packets on port {0} and channel {1}", - e.port_channel_id.0, - e.port_channel_id.1) - }, - - ZeroPacketSequence - | _ | { "packet sequence cannot be 0" }, - - ZeroPacketData - | _ | { "packet data bytes cannot be empty" }, - - ZeroPacketTimeout - | _ | { "packet timeout height and packet timeout timestamp cannot both be 0" }, - - InvalidTimeoutHeight - | _ | { "invalid timeout height for the packet" }, - - InvalidPacket - | _ | { "invalid packet" }, - - MissingPacket - | _ | { "there is no packet in this message" }, - - MissingChannelId - | _ | { "missing channel id" }, - - MissingCounterparty - | _ | { "missing counterparty" }, - - NoCommonVersion - | _ | { "no commong version" }, - - MissingChannel - | _ | { "missing channel end" }, - - InvalidVersionLengthConnection - | _ | { "single version must be negociated on connection before opening channel" }, - - ChannelFeatureNotSuportedByConnection - | _ | { "the channel ordering is not supported by connection" }, - - ChannelNotFound - { port_id: PortId, channel_id: ChannelId } - | e | { - format_args!( - "the channel end ({0}, {1}) does not exist", - e.port_id, e.channel_id) - }, - - ChannelMismatch - { channel_id: ChannelId } - | e | { - format_args!( - "a different channel exists (was initialized) already for the same channel identifier {0}", - e.channel_id) - }, - - ConnectionNotOpen - { connection_id: ConnectionId } - | e | { - format_args!( - "the associated connection {0} is not OPEN", - e.connection_id) - }, - - UndefinedConnectionCounterparty - { connection_id: ConnectionId } - | e | { - format_args!( - "Undefined counterparty connection for {0}", - e.connection_id) - }, - - PacketVerificationFailed - { sequence: Sequence } - [ client_error::Error ] - | e | { - format_args!( - "Verification fails for the packet with the sequence number {0}", - e.sequence) - }, - - VerifyChannelFailed - [ client_error::Error ] - | _ | { - "Error verifying channel state" - }, - - InvalidAcknowledgement - | _ | { "Acknowledgment cannot be empty" }, - - AcknowledgementExists - { sequence: Sequence } - | e | { - format_args!( - "Packet acknowledgement exists for the packet with the sequence {0}", - e.sequence) - }, - - MissingNextSendSeq - { port_channel_id: (PortId, ChannelId) } - | e | { - format_args!("Missing sequence number for sending packets on port {0} and channel {1}", - e.port_channel_id.0, - e.port_channel_id.1) - }, - - InvalidStringAsSequence - { value: String } - [ TraceError ] - | e | { - format_args!( - "String {0} cannot be converted to packet sequence", - e.value) - }, - - InvalidPacketSequence - { - given_sequence: Sequence, - next_sequence: Sequence - } - | e | { - format_args!( - "Invalid packet sequence {0} ≠ next send sequence {1}", - e.given_sequence, e.next_sequence) - }, - - LowPacketHeight - { - chain_height: Height, - timeout_height: Height - } - | e | { - format_args!( - "Receiving chain block height {0} >= packet timeout height {1}", - e.chain_height, e.timeout_height) - }, - - PacketTimeoutHeightNotReached - { - timeout_height: Height, - chain_height: Height, - } - | e | { - format_args!( - "Packet timeout height {0} > chain height {1}", - e.timeout_height, e.chain_height) - }, - - PacketTimeoutTimestampNotReached - { - timeout_timestamp: Timestamp, - chain_timestamp: Timestamp, - } - | e | { - format_args!( - "Packet timeout timestamp {0} > chain timestamp {1}", - e.timeout_timestamp, e.chain_timestamp) - }, - - LowPacketTimestamp - | _ | { "Receiving chain block timestamp >= packet timeout timestamp" }, - - InvalidPacketTimestamp - [ crate::timestamp::ParseTimestampError ] - | _ | { "Invalid packet timeout timestamp value" }, - - ErrorInvalidConsensusState - | _ | { "Invalid timestamp in consensus state; timestamp must be a positive value" }, - - FrozenClient - { client_id: ClientId } - | e | { - format_args!( - "Client with id {0} is frozen", - e.client_id) - }, - - InvalidCounterpartyChannelId - [ ValidationError ] - | _ | { "Invalid channel id in counterparty" }, - - InvalidChannelState - { channel_id: ChannelId, state: State } - | e | { - format_args!( - "Channel {0} should not be state {1}", - e.channel_id, e.state) - }, - - ChannelClosed - { channel_id: ChannelId } - | e | { - format_args!( - "Channel {0} is Closed", - e.channel_id) - }, - - ChanOpenAckProofVerification - | _ | { "Handshake proof verification fails at ChannelOpenAck" }, - - PacketCommitmentNotFound - { sequence: Sequence } - | e | { - format_args!( - "Commitment for the packet {0} not found", - e.sequence) - }, - - IncorrectPacketCommitment - { sequence: Sequence } - | e | { - format_args!( - "The stored commitment of the packet {0} is incorrect", - e.sequence) - }, - - PacketReceiptNotFound - { sequence: Sequence } - | e | { - format_args!( - "Receipt for the packet {0} not found", - e.sequence) - }, - - PacketAcknowledgementNotFound - { sequence: Sequence } - | e | { - format_args!( - "Acknowledgment for the packet {0} not found", - e.sequence) - }, - - MissingNextAckSeq - { port_channel_id: (PortId, ChannelId) } - | e | { - format_args!("Missing sequence number for ack packets on port {0} and channel {1}", - e.port_channel_id.0, - e.port_channel_id.1) - }, - - ProcessedTimeNotFound - { - client_id: ClientId, - height: Height, - } - | e | { - format_args!( - "Processed time for the client {0} at height {1} not found", - e.client_id, e.height) - }, - - ProcessedHeightNotFound - { - client_id: ClientId, - height: Height, - } - | e | { - format_args!( - "Processed height for the client {0} at height {1} not found", - e.client_id, e.height) - }, - - RouteNotFound - | _ | { "route not found" }, - - ImplementationSpecific - | _ | { "implementation specific error" }, - - AppModule - { description: String } - | e | { - format_args!( - "application module error: {0}", - e.description) - }, - } + #[derive(Debug, PartialEq, Eq)] + Error { + Ics03Connection + [ connection_error::Error ] + | _ | { "ics03 connection error" }, + + Ics02Client + [ client_error::Error ] + | _ | { "ics02 client error" }, + + Ics05Port + [ port_error::Error ] + | _ | { "ics05 port error" }, + + UnknownState + { state: i32 } + | e | { format_args!("channel state unknown: {}", e.state) }, + + Identifier + [ ValidationError ] + | _ | { "identifier error" }, + + UnknownOrderType + { type_id: String } + | e | { format_args!("channel order type unknown: {}", e.type_id) }, + + InvalidConnectionHopsLength + { expected: usize, actual: usize } + | e | { + format_args!( + "invalid connection hops length: expected {0}; actual {1}", + e.expected, e.actual) + }, + + InvalidPacketCounterparty + { port_id: PortId, channel_id: ChannelId } + | e | { + format_args!( + "packet destination port {} and channel {} doesn't match the counterparty's port/channel", + e.port_id, e.channel_id) + }, + + InvalidVersion + [ TraceError ] + | _ | { "invalid version" }, + + Signer + [ SignerError ] + | _ | { "invalid signer address" }, + + InvalidProof + [ ProofError ] + | _ | { "invalid proof" }, + + MissingHeight + | _ | { "invalid proof: missing height" }, + + MissingChannelProof + | _ | { "invalid proof: missing channel proof" }, + + MissingNextRecvSeq + { port_channel_id: (PortId, ChannelId) } + | e | { + format_args!("Missing sequence number for receiving packets on port {0} and channel {1}", + e.port_channel_id.0, + e.port_channel_id.1) + }, + + ZeroPacketSequence + | _ | { "packet sequence cannot be 0" }, + + ZeroPacketData + | _ | { "packet data bytes cannot be empty" }, + + ZeroPacketTimeout + | _ | { "packet timeout height and packet timeout timestamp cannot both be 0" }, + + InvalidTimeoutHeight + | _ | { "invalid timeout height for the packet" }, + + InvalidPacket + | _ | { "invalid packet" }, + + MissingPacket + | _ | { "there is no packet in this message" }, + + MissingChannelId + | _ | { "missing channel id" }, + + MissingCounterparty + | _ | { "missing counterparty" }, + + NoCommonVersion + | _ | { "no commong version" }, + + MissingChannel + | _ | { "missing channel end" }, + + InvalidVersionLengthConnection + | _ | { "single version must be negociated on connection before opening channel" }, + + ChannelFeatureNotSuportedByConnection + | _ | { "the channel ordering is not supported by connection" }, + + ChannelNotFound + { port_id: PortId, channel_id: ChannelId } + | e | { + format_args!( + "the channel end ({0}, {1}) does not exist", + e.port_id, e.channel_id) + }, + + ChannelMismatch + { channel_id: ChannelId } + | e | { + format_args!( + "a different channel exists (was initialized) already for the same channel identifier {0}", + e.channel_id) + }, + + ConnectionNotOpen + { connection_id: ConnectionId } + | e | { + format_args!( + "the associated connection {0} is not OPEN", + e.connection_id) + }, + + UndefinedConnectionCounterparty + { connection_id: ConnectionId } + | e | { + format_args!( + "Undefined counterparty connection for {0}", + e.connection_id) + }, + + PacketVerificationFailed + { sequence: Sequence } + [ client_error::Error ] + | e | { + format_args!( + "Verification fails for the packet with the sequence number {0}", + e.sequence) + }, + + VerifyChannelFailed + [ client_error::Error ] + | _ | { + "Error verifying channel state" + }, + + InvalidAcknowledgement + | _ | { "Acknowledgment cannot be empty" }, + + AcknowledgementExists + { sequence: Sequence } + | e | { + format_args!( + "Packet acknowledgement exists for the packet with the sequence {0}", + e.sequence) + }, + + MissingNextSendSeq + { port_channel_id: (PortId, ChannelId) } + | e | { + format_args!("Missing sequence number for sending packets on port {0} and channel {1}", + e.port_channel_id.0, + e.port_channel_id.1) + }, + + InvalidStringAsSequence + { value: String } + [ TraceError ] + | e | { + format_args!( + "String {0} cannot be converted to packet sequence", + e.value) + }, + + InvalidPacketSequence + { + given_sequence: Sequence, + next_sequence: Sequence + } + | e | { + format_args!( + "Invalid packet sequence {0} ≠ next send sequence {1}", + e.given_sequence, e.next_sequence) + }, + + LowPacketHeight + { + chain_height: Height, + timeout_height: Height + } + | e | { + format_args!( + "Receiving chain block height {0} >= packet timeout height {1}", + e.chain_height, e.timeout_height) + }, + PacketTimeoutNotReached + { + timeout_height: Height, + chain_height: Height, + timeout_timestamp: Timestamp, + chain_timestamp: Timestamp, + } + | e | { format_args!( + "Packet timeout not satisified for either packet height or timestamp, Packet timeout height {0}, chain height {1}, Packet timeout timestamp {2}, chain timestamp {3}", + e.timeout_height, e.chain_height, e.timeout_timestamp, e.chain_timestamp) }, + + PacketTimeoutHeightNotReached + { + timeout_height: Height, + chain_height: Height, + } + | e | { + format_args!( + "Packet timeout height {0} > chain height {1}", + e.timeout_height, e.chain_height) + }, + + PacketTimeoutTimestampNotReached + { + timeout_timestamp: Timestamp, + chain_timestamp: Timestamp, + } + | e | { + format_args!( + "Packet timeout timestamp {0} > chain timestamp {1}", + e.timeout_timestamp, e.chain_timestamp) + }, + + LowPacketTimestamp + | _ | { "Receiving chain block timestamp >= packet timeout timestamp" }, + + InvalidPacketTimestamp + [ crate::timestamp::ParseTimestampError ] + | _ | { "Invalid packet timeout timestamp value" }, + + ErrorInvalidConsensusState + | _ | { "Invalid timestamp in consensus state; timestamp must be a positive value" }, + + FrozenClient + { client_id: ClientId } + | e | { + format_args!( + "Client with id {0} is frozen", + e.client_id) + }, + + InvalidCounterpartyChannelId + [ ValidationError ] + | _ | { "Invalid channel id in counterparty" }, + + InvalidChannelState + { channel_id: ChannelId, state: State } + | e | { + format_args!( + "Channel {0} should not be state {1}", + e.channel_id, e.state) + }, + + ChannelClosed + { channel_id: ChannelId } + | e | { + format_args!( + "Channel {0} is Closed", + e.channel_id) + }, + + ChanOpenAckProofVerification + | _ | { "Handshake proof verification fails at ChannelOpenAck" }, + + PacketCommitmentNotFound + { sequence: Sequence } + | e | { + format_args!( + "Commitment for the packet {0} not found", + e.sequence) + }, + + IncorrectPacketCommitment + { sequence: Sequence } + | e | { + format_args!( + "The stored commitment of the packet {0} is incorrect", + e.sequence) + }, + + PacketReceiptNotFound + { sequence: Sequence } + | e | { + format_args!( + "Receipt for the packet {0} not found", + e.sequence) + }, + + PacketAcknowledgementNotFound + { sequence: Sequence } + | e | { + format_args!( + "Acknowledgment for the packet {0} not found", + e.sequence) + }, + + MissingNextAckSeq + { port_channel_id: (PortId, ChannelId) } + | e | { + format_args!("Missing sequence number for ack packets on port {0} and channel {1}", + e.port_channel_id.0, + e.port_channel_id.1) + }, + + ProcessedTimeNotFound + { + client_id: ClientId, + height: Height, + } + | e | { + format_args!( + "Processed time for the client {0} at height {1} not found", + e.client_id, e.height) + }, + + ProcessedHeightNotFound + { + client_id: ClientId, + height: Height, + } + | e | { + format_args!( + "Processed height for the client {0} at height {1} not found", + e.client_id, e.height) + }, + + RouteNotFound + | _ | { "route not found" }, + + ImplementationSpecific + { reason: String } + | e | { format_args!("implementation specific error: {}", e.reason) }, + + AppModule + { description: String } + | e | { + format_args!( + "application module error: {0}", + e.description) + }, + } } impl Error { - pub fn chan_open_confirm_proof_verification(e: Error) -> Error { - e.add_trace(&"Handshake proof verification fails at ChannelOpenConfirm") - } + pub fn chan_open_confirm_proof_verification(e: Error) -> Error { + e.add_trace(&"Handshake proof verification fails at ChannelOpenConfirm") + } } diff --git a/modules/src/core/ics04_channel/events.rs b/modules/src/core/ics04_channel/events.rs index 35b4865ef6..07a7aa7166 100644 --- a/modules/src/core/ics04_channel/events.rs +++ b/modules/src/core/ics04_channel/events.rs @@ -1,18 +1,20 @@ //! Types for the IBC events emitted from Tendermint Websocket by the channels module. use serde_derive::{Deserialize, Serialize}; -use tendermint::abci::tag::Tag; -use tendermint::abci::Event as AbciEvent; - -use crate::core::ics02_client::height::Height; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::packet::Packet; -use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; -use crate::events::{ - extract_attribute, maybe_extract_attribute, Error as EventError, IbcEvent, IbcEventType, - RawObject, +use tendermint::abci::{Event as AbciEvent, EventAttribute}; + +use crate::{ + core::{ + ics02_client::height::Height, + ics04_channel::{error::Error, packet::Packet}, + ics24_host::identifier::{ChannelId, ConnectionId, PortId}, + }, + events::{ + extract_attribute, maybe_extract_attribute, Error as EventError, IbcEvent, IbcEventType, + RawObject, + }, + prelude::*, }; -use crate::prelude::*; /// Channel event attribute keys const HEIGHT_ATTRIBUTE_KEY: &str = "height"; @@ -34,202 +36,195 @@ const PKT_TIMEOUT_TIMESTAMP_ATTRIBUTE_KEY: &str = "packet_timeout_timestamp"; const PKT_ACK_ATTRIBUTE_KEY: &str = "packet_ack"; pub fn try_from_tx(event: &tendermint::abci::Event) -> Option { - match event.type_str.parse() { - Ok(IbcEventType::OpenInitChannel) => extract_attributes_from_tx(event) - .map(OpenInit::try_from) - .map(|res| res.ok().map(IbcEvent::OpenInitChannel)) - .ok() - .flatten(), - Ok(IbcEventType::OpenTryChannel) => extract_attributes_from_tx(event) - .map(OpenTry::try_from) - .map(|res| res.ok().map(IbcEvent::OpenTryChannel)) - .ok() - .flatten(), - Ok(IbcEventType::OpenAckChannel) => extract_attributes_from_tx(event) - .map(OpenAck::try_from) - .map(|res| res.ok().map(IbcEvent::OpenAckChannel)) - .ok() - .flatten(), - Ok(IbcEventType::OpenConfirmChannel) => extract_attributes_from_tx(event) - .map(OpenConfirm::try_from) - .map(|res| res.ok().map(IbcEvent::OpenConfirmChannel)) - .ok() - .flatten(), - Ok(IbcEventType::CloseInitChannel) => extract_attributes_from_tx(event) - .map(CloseInit::try_from) - .map(|res| res.ok().map(IbcEvent::CloseInitChannel)) - .ok() - .flatten(), - Ok(IbcEventType::CloseConfirmChannel) => extract_attributes_from_tx(event) - .map(CloseConfirm::try_from) - .map(|res| res.ok().map(IbcEvent::CloseConfirmChannel)) - .ok() - .flatten(), - Ok(IbcEventType::SendPacket) => { - extract_packet_and_write_ack_from_tx(event) - .map(|(packet, write_ack)| { - // This event should not have a write ack. - debug_assert_eq!(write_ack.len(), 0); - IbcEvent::SendPacket(SendPacket { - height: Default::default(), - packet, - }) - }) - .ok() - } - Ok(IbcEventType::WriteAck) => extract_packet_and_write_ack_from_tx(event) - .map(|(packet, write_ack)| { - IbcEvent::WriteAcknowledgement(WriteAcknowledgement { - height: Default::default(), - packet, - ack: write_ack, - }) - }) - .ok(), - Ok(IbcEventType::AckPacket) => { - extract_packet_and_write_ack_from_tx(event) - .map(|(packet, write_ack)| { - // This event should not have a write ack. - debug_assert_eq!(write_ack.len(), 0); - IbcEvent::AcknowledgePacket(AcknowledgePacket { - height: Default::default(), - packet, - }) - }) - .ok() - } - Ok(IbcEventType::Timeout) => { - extract_packet_and_write_ack_from_tx(event) - .map(|(packet, write_ack)| { - // This event should not have a write ack. - debug_assert_eq!(write_ack.len(), 0); - IbcEvent::TimeoutPacket(TimeoutPacket { - height: Default::default(), - packet, - }) - }) - .ok() - } - _ => None, - } + match event.kind.parse() { + Ok(IbcEventType::OpenInitChannel) => extract_attributes_from_tx(event) + .map(OpenInit::try_from) + .map(|res| res.ok().map(IbcEvent::OpenInitChannel)) + .ok() + .flatten(), + Ok(IbcEventType::OpenTryChannel) => extract_attributes_from_tx(event) + .map(OpenTry::try_from) + .map(|res| res.ok().map(IbcEvent::OpenTryChannel)) + .ok() + .flatten(), + Ok(IbcEventType::OpenAckChannel) => extract_attributes_from_tx(event) + .map(OpenAck::try_from) + .map(|res| res.ok().map(IbcEvent::OpenAckChannel)) + .ok() + .flatten(), + Ok(IbcEventType::OpenConfirmChannel) => extract_attributes_from_tx(event) + .map(OpenConfirm::try_from) + .map(|res| res.ok().map(IbcEvent::OpenConfirmChannel)) + .ok() + .flatten(), + Ok(IbcEventType::CloseInitChannel) => extract_attributes_from_tx(event) + .map(CloseInit::try_from) + .map(|res| res.ok().map(IbcEvent::CloseInitChannel)) + .ok() + .flatten(), + Ok(IbcEventType::CloseConfirmChannel) => extract_attributes_from_tx(event) + .map(CloseConfirm::try_from) + .map(|res| res.ok().map(IbcEvent::CloseConfirmChannel)) + .ok() + .flatten(), + Ok(IbcEventType::SendPacket) => { + extract_packet_and_write_ack_from_tx(event) + .map(|(packet, write_ack)| { + // This event should not have a write ack. + debug_assert_eq!(write_ack.len(), 0); + IbcEvent::SendPacket(SendPacket { height: Default::default(), packet }) + }) + .ok() + }, + Ok(IbcEventType::WriteAck) => extract_packet_and_write_ack_from_tx(event) + .map(|(packet, write_ack)| { + IbcEvent::WriteAcknowledgement(WriteAcknowledgement { + height: Default::default(), + packet, + ack: write_ack, + }) + }) + .ok(), + Ok(IbcEventType::AckPacket) => { + extract_packet_and_write_ack_from_tx(event) + .map(|(packet, write_ack)| { + // This event should not have a write ack. + debug_assert_eq!(write_ack.len(), 0); + IbcEvent::AcknowledgePacket(AcknowledgePacket { + height: Default::default(), + packet, + }) + }) + .ok() + }, + Ok(IbcEventType::Timeout) => { + extract_packet_and_write_ack_from_tx(event) + .map(|(packet, write_ack)| { + // This event should not have a write ack. + debug_assert_eq!(write_ack.len(), 0); + IbcEvent::TimeoutPacket(TimeoutPacket { height: Default::default(), packet }) + }) + .ok() + }, + _ => None, + } } fn extract_attributes_from_tx(event: &tendermint::abci::Event) -> Result { - let mut attr = Attributes::default(); - - for tag in &event.attributes { - let key = tag.key.as_ref(); - let value = tag.value.as_ref(); - match key { - PORT_ID_ATTRIBUTE_KEY => attr.port_id = value.parse().map_err(Error::identifier)?, - CHANNEL_ID_ATTRIBUTE_KEY => { - attr.channel_id = value.parse().ok(); - } - CONNECTION_ID_ATTRIBUTE_KEY => { - attr.connection_id = value.parse().map_err(Error::identifier)?; - } - COUNTERPARTY_PORT_ID_ATTRIBUTE_KEY => { - attr.counterparty_port_id = value.parse().map_err(Error::identifier)?; - } - COUNTERPARTY_CHANNEL_ID_ATTRIBUTE_KEY => { - attr.counterparty_channel_id = value.parse().ok(); - } - _ => {} - } - } - - Ok(attr) + let mut attr = Attributes::default(); + + for tag in &event.attributes { + let key = tag.key.as_str(); + let value = tag.value.as_str(); + match key { + PORT_ID_ATTRIBUTE_KEY => attr.port_id = value.parse().map_err(Error::identifier)?, + CHANNEL_ID_ATTRIBUTE_KEY => { + attr.channel_id = value.parse().ok(); + }, + CONNECTION_ID_ATTRIBUTE_KEY => { + attr.connection_id = value.parse().map_err(Error::identifier)?; + }, + COUNTERPARTY_PORT_ID_ATTRIBUTE_KEY => { + attr.counterparty_port_id = value.parse().map_err(Error::identifier)?; + }, + COUNTERPARTY_CHANNEL_ID_ATTRIBUTE_KEY => { + attr.counterparty_channel_id = value.parse().ok(); + }, + _ => {}, + } + } + + Ok(attr) } fn extract_packet_and_write_ack_from_tx( - event: &tendermint::abci::Event, + event: &tendermint::abci::Event, ) -> Result<(Packet, Vec), Error> { - let mut packet = Packet::default(); - let mut write_ack: Vec = Vec::new(); - for tag in &event.attributes { - let key = tag.key.as_ref(); - let value = tag.value.as_ref(); - match key { - PKT_SRC_PORT_ATTRIBUTE_KEY => { - packet.source_port = value.parse().map_err(Error::identifier)?; - } - PKT_SRC_CHANNEL_ATTRIBUTE_KEY => { - packet.source_channel = value.parse().map_err(Error::identifier)?; - } - PKT_DST_PORT_ATTRIBUTE_KEY => { - packet.destination_port = value.parse().map_err(Error::identifier)?; - } - PKT_DST_CHANNEL_ATTRIBUTE_KEY => { - packet.destination_channel = value.parse().map_err(Error::identifier)?; - } - PKT_SEQ_ATTRIBUTE_KEY => { - packet.sequence = value - .parse::() - .map_err(|e| Error::invalid_string_as_sequence(value.to_string(), e))? - .into() - } - PKT_TIMEOUT_HEIGHT_ATTRIBUTE_KEY => { - packet.timeout_height = - value.parse().map_err(|_| Error::invalid_timeout_height())?; - } - PKT_TIMEOUT_TIMESTAMP_ATTRIBUTE_KEY => { - packet.timeout_timestamp = value.parse().unwrap(); - } - PKT_DATA_ATTRIBUTE_KEY => { - packet.data = Vec::from(value.as_bytes()); - } - PKT_ACK_ATTRIBUTE_KEY => { - write_ack = Vec::from(value.as_bytes()); - } - _ => {} - } - } - - Ok((packet, write_ack)) + let mut packet = Packet::default(); + let mut write_ack: Vec = Vec::new(); + for tag in &event.attributes { + let key = tag.key.as_str(); + let value = tag.value.as_str(); + match key { + PKT_SRC_PORT_ATTRIBUTE_KEY => { + packet.source_port = value.parse().map_err(Error::identifier)?; + }, + PKT_SRC_CHANNEL_ATTRIBUTE_KEY => { + packet.source_channel = value.parse().map_err(Error::identifier)?; + }, + PKT_DST_PORT_ATTRIBUTE_KEY => { + packet.destination_port = value.parse().map_err(Error::identifier)?; + }, + PKT_DST_CHANNEL_ATTRIBUTE_KEY => { + packet.destination_channel = value.parse().map_err(Error::identifier)?; + }, + PKT_SEQ_ATTRIBUTE_KEY => + packet.sequence = value + .parse::() + .map_err(|e| Error::invalid_string_as_sequence(value.to_string(), e))? + .into(), + PKT_TIMEOUT_HEIGHT_ATTRIBUTE_KEY => { + packet.timeout_height = + value.parse().map_err(|_| Error::invalid_timeout_height())?; + }, + PKT_TIMEOUT_TIMESTAMP_ATTRIBUTE_KEY => { + packet.timeout_timestamp = value.parse().unwrap(); + }, + PKT_DATA_ATTRIBUTE_KEY => { + packet.data = Vec::from(value.as_bytes()); + }, + PKT_ACK_ATTRIBUTE_KEY => { + write_ack = Vec::from(value.as_bytes()); + }, + _ => {}, + } + } + + Ok((packet, write_ack)) } fn extract_attributes(object: &RawObject<'_>, namespace: &str) -> Result { - Ok(Attributes { - height: object.height, - port_id: extract_attribute(object, &format!("{}.port_id", namespace))? - .parse() - .map_err(EventError::parse)?, - channel_id: maybe_extract_attribute(object, &format!("{}.channel_id", namespace)) - .and_then(|v| v.parse().ok()), - connection_id: extract_attribute(object, &format!("{}.connection_id", namespace))? - .parse() - .map_err(EventError::parse)?, - counterparty_port_id: extract_attribute( - object, - &format!("{}.counterparty_port_id", namespace), - )? - .parse() - .map_err(EventError::parse)?, - counterparty_channel_id: maybe_extract_attribute( - object, - &format!("{}.counterparty_channel_id", namespace), - ) - .and_then(|v| v.parse().ok()), - }) + Ok(Attributes { + height: object.height, + port_id: extract_attribute(object, &format!("{}.port_id", namespace))? + .parse() + .map_err(EventError::parse)?, + channel_id: maybe_extract_attribute(object, &format!("{}.channel_id", namespace)) + .and_then(|v| v.parse().ok()), + connection_id: extract_attribute(object, &format!("{}.connection_id", namespace))? + .parse() + .map_err(EventError::parse)?, + counterparty_port_id: extract_attribute( + object, + &format!("{}.counterparty_port_id", namespace), + )? + .parse() + .map_err(EventError::parse)?, + counterparty_channel_id: maybe_extract_attribute( + object, + &format!("{}.counterparty_channel_id", namespace), + ) + .and_then(|v| v.parse().ok()), + }) } #[derive(Debug, Default, Deserialize, Serialize, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Attributes { - pub height: Height, - pub port_id: PortId, - pub channel_id: Option, - pub connection_id: ConnectionId, - pub counterparty_port_id: PortId, - pub counterparty_channel_id: Option, + pub height: Height, + pub port_id: PortId, + pub channel_id: Option, + pub connection_id: ConnectionId, + pub counterparty_port_id: PortId, + pub counterparty_channel_id: Option, } impl Attributes { - pub fn port_id(&self) -> &PortId { - &self.port_id - } - pub fn channel_id(&self) -> Option<&ChannelId> { - self.channel_id.as_ref() - } + pub fn port_id(&self) -> &PortId { + &self.port_id + } + pub fn channel_id(&self) -> Option<&ChannelId> { + self.channel_id.as_ref() + } } /// Convert attributes to Tendermint ABCI tags @@ -240,45 +235,51 @@ impl Attributes { /// is infallible, even if it is not represented in the error type. /// Once tendermint-rs improves the API of the `Key` and `Value` types, /// we will be able to remove the `.parse().unwrap()` calls. -impl From for Vec { - fn from(a: Attributes) -> Self { - let mut attributes = vec![]; - let height = Tag { - key: HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), - value: a.height.to_string().parse().unwrap(), - }; - attributes.push(height); - let port_id = Tag { - key: PORT_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: a.port_id.to_string().parse().unwrap(), - }; - attributes.push(port_id); - if let Some(channel_id) = a.channel_id { - let channel_id = Tag { - key: CHANNEL_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: channel_id.to_string().parse().unwrap(), - }; - attributes.push(channel_id); - } - let connection_id = Tag { - key: CONNECTION_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: a.connection_id.to_string().parse().unwrap(), - }; - attributes.push(connection_id); - let counterparty_port_id = Tag { - key: COUNTERPARTY_PORT_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: a.counterparty_port_id.to_string().parse().unwrap(), - }; - attributes.push(counterparty_port_id); - if let Some(channel_id) = a.counterparty_channel_id { - let channel_id = Tag { - key: COUNTERPARTY_CHANNEL_ID_ATTRIBUTE_KEY.parse().unwrap(), - value: channel_id.to_string().parse().unwrap(), - }; - attributes.push(channel_id); - } - attributes - } +impl From for Vec { + fn from(a: Attributes) -> Self { + let mut attributes = vec![]; + let height = EventAttribute { + key: HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), + value: a.height.to_string().parse().unwrap(), + index: false, + }; + attributes.push(height); + let port_id = EventAttribute { + key: PORT_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: a.port_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(port_id); + if let Some(channel_id) = a.channel_id { + let channel_id = EventAttribute { + key: CHANNEL_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: channel_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(channel_id); + } + let connection_id = EventAttribute { + key: CONNECTION_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: a.connection_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(connection_id); + let counterparty_port_id = EventAttribute { + key: COUNTERPARTY_PORT_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: a.counterparty_port_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(counterparty_port_id); + if let Some(channel_id) = a.counterparty_channel_id { + let channel_id = EventAttribute { + key: COUNTERPARTY_CHANNEL_ID_ATTRIBUTE_KEY.parse().unwrap(), + value: channel_id.to_string().parse().unwrap(), + index: false, + }; + attributes.push(channel_id); + } + attributes + } } /// Convert attributes to Tendermint ABCI tags @@ -289,409 +290,413 @@ impl From for Vec { /// is infallible, even if it is not represented in the error type. /// Once tendermint-rs improves the API of the `Key` and `Value` types, /// we will be able to remove the `.parse().unwrap()` calls. -impl TryFrom for Vec { - type Error = Error; - fn try_from(p: Packet) -> Result { - let mut attributes = vec![]; - let src_port = Tag { - key: PKT_SRC_PORT_ATTRIBUTE_KEY.parse().unwrap(), - value: p.source_port.to_string().parse().unwrap(), - }; - attributes.push(src_port); - let src_channel = Tag { - key: PKT_SRC_CHANNEL_ATTRIBUTE_KEY.parse().unwrap(), - value: p.source_channel.to_string().parse().unwrap(), - }; - attributes.push(src_channel); - let dst_port = Tag { - key: PKT_DST_PORT_ATTRIBUTE_KEY.parse().unwrap(), - value: p.destination_port.to_string().parse().unwrap(), - }; - attributes.push(dst_port); - let dst_channel = Tag { - key: PKT_DST_CHANNEL_ATTRIBUTE_KEY.parse().unwrap(), - value: p.destination_channel.to_string().parse().unwrap(), - }; - attributes.push(dst_channel); - let sequence = Tag { - key: PKT_SEQ_ATTRIBUTE_KEY.parse().unwrap(), - value: p.sequence.to_string().parse().unwrap(), - }; - attributes.push(sequence); - let timeout_height = Tag { - key: PKT_TIMEOUT_HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), - value: p.timeout_height.to_string().parse().unwrap(), - }; - attributes.push(timeout_height); - let timeout_timestamp = Tag { - key: PKT_TIMEOUT_TIMESTAMP_ATTRIBUTE_KEY.parse().unwrap(), - value: p - .timeout_timestamp - .nanoseconds() - .to_string() - .parse() - .unwrap(), - }; - attributes.push(timeout_timestamp); - let val = - String::from_utf8(p.data).expect("hex-encoded string should always be valid UTF-8"); - let packet_data = Tag { - key: PKT_DATA_ATTRIBUTE_KEY.parse().unwrap(), - value: val.parse().unwrap(), - }; - attributes.push(packet_data); - let ack = Tag { - key: PKT_ACK_ATTRIBUTE_KEY.parse().unwrap(), - value: "".parse().unwrap(), - }; - attributes.push(ack); - Ok(attributes) - } +impl TryFrom for Vec { + type Error = Error; + fn try_from(p: Packet) -> Result { + let mut attributes = vec![]; + let src_port = EventAttribute { + key: PKT_SRC_PORT_ATTRIBUTE_KEY.parse().unwrap(), + value: p.source_port.to_string().parse().unwrap(), + index: false, + }; + attributes.push(src_port); + let src_channel = EventAttribute { + key: PKT_SRC_CHANNEL_ATTRIBUTE_KEY.parse().unwrap(), + value: p.source_channel.to_string().parse().unwrap(), + index: false, + }; + attributes.push(src_channel); + let dst_port = EventAttribute { + key: PKT_DST_PORT_ATTRIBUTE_KEY.parse().unwrap(), + value: p.destination_port.to_string().parse().unwrap(), + index: false, + }; + attributes.push(dst_port); + let dst_channel = EventAttribute { + key: PKT_DST_CHANNEL_ATTRIBUTE_KEY.parse().unwrap(), + value: p.destination_channel.to_string().parse().unwrap(), + index: false, + }; + attributes.push(dst_channel); + let sequence = EventAttribute { + key: PKT_SEQ_ATTRIBUTE_KEY.parse().unwrap(), + value: p.sequence.to_string().parse().unwrap(), + index: false, + }; + attributes.push(sequence); + let timeout_height = EventAttribute { + key: PKT_TIMEOUT_HEIGHT_ATTRIBUTE_KEY.parse().unwrap(), + value: p.timeout_height.to_string().parse().unwrap(), + index: false, + }; + attributes.push(timeout_height); + let timeout_timestamp = EventAttribute { + key: PKT_TIMEOUT_TIMESTAMP_ATTRIBUTE_KEY.parse().unwrap(), + value: p.timeout_timestamp.nanoseconds().to_string().parse().unwrap(), + index: false, + }; + attributes.push(timeout_timestamp); + let val = + String::from_utf8(p.data).expect("hex-encoded string should always be valid UTF-8"); + let packet_data = EventAttribute { + key: PKT_DATA_ATTRIBUTE_KEY.parse().unwrap(), + value: val.parse().unwrap(), + index: false, + }; + attributes.push(packet_data); + let ack = EventAttribute { + key: PKT_ACK_ATTRIBUTE_KEY.parse().unwrap(), + value: "".parse().unwrap(), + index: false, + }; + attributes.push(ack); + Ok(attributes) + } } trait EventType { - fn event_type() -> IbcEventType; + fn event_type() -> IbcEventType; } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenInit { - pub height: Height, - pub port_id: PortId, - pub channel_id: Option, - pub connection_id: ConnectionId, - pub counterparty_port_id: PortId, - pub counterparty_channel_id: Option, + pub height: Height, + pub port_id: PortId, + pub channel_id: Option, + pub connection_id: ConnectionId, + pub counterparty_port_id: PortId, + pub counterparty_channel_id: Option, } impl OpenInit { - pub fn channel_id(&self) -> Option<&ChannelId> { - self.channel_id.as_ref() - } - pub fn port_id(&self) -> &PortId { - &self.port_id - } - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } + pub fn channel_id(&self) -> Option<&ChannelId> { + self.channel_id.as_ref() + } + pub fn port_id(&self) -> &PortId { + &self.port_id + } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } } impl From for Attributes { - fn from(ev: OpenInit) -> Self { - Self { - height: ev.height, - port_id: ev.port_id, - channel_id: ev.channel_id, - connection_id: ev.connection_id, - counterparty_port_id: ev.counterparty_port_id, - counterparty_channel_id: ev.counterparty_channel_id, - } - } + fn from(ev: OpenInit) -> Self { + Self { + height: ev.height, + port_id: ev.port_id, + channel_id: ev.channel_id, + connection_id: ev.connection_id, + counterparty_port_id: ev.counterparty_port_id, + counterparty_channel_id: ev.counterparty_channel_id, + } + } } impl From for IbcEvent { - fn from(v: OpenInit) -> Self { - IbcEvent::OpenInitChannel(v) - } + fn from(v: OpenInit) -> Self { + IbcEvent::OpenInitChannel(v) + } } impl EventType for OpenInit { - fn event_type() -> IbcEventType { - IbcEventType::OpenInitChannel - } + fn event_type() -> IbcEventType { + IbcEventType::OpenInitChannel + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenTry { - pub height: Height, - pub port_id: PortId, - pub channel_id: Option, - pub connection_id: ConnectionId, - pub counterparty_port_id: PortId, - pub counterparty_channel_id: Option, + pub height: Height, + pub port_id: PortId, + pub channel_id: Option, + pub connection_id: ConnectionId, + pub counterparty_port_id: PortId, + pub counterparty_channel_id: Option, } impl From for Attributes { - fn from(ev: OpenTry) -> Self { - Self { - height: ev.height, - port_id: ev.port_id, - channel_id: ev.channel_id, - connection_id: ev.connection_id, - counterparty_port_id: ev.counterparty_port_id, - counterparty_channel_id: ev.counterparty_channel_id, - } - } + fn from(ev: OpenTry) -> Self { + Self { + height: ev.height, + port_id: ev.port_id, + channel_id: ev.channel_id, + connection_id: ev.connection_id, + counterparty_port_id: ev.counterparty_port_id, + counterparty_channel_id: ev.counterparty_channel_id, + } + } } impl OpenTry { - pub fn channel_id(&self) -> Option<&ChannelId> { - self.channel_id.as_ref() - } - pub fn port_id(&self) -> &PortId { - &self.port_id - } - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } + pub fn channel_id(&self) -> Option<&ChannelId> { + self.channel_id.as_ref() + } + pub fn port_id(&self) -> &PortId { + &self.port_id + } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } } impl From for IbcEvent { - fn from(v: OpenTry) -> Self { - IbcEvent::OpenTryChannel(v) - } + fn from(v: OpenTry) -> Self { + IbcEvent::OpenTryChannel(v) + } } impl EventType for OpenTry { - fn event_type() -> IbcEventType { - IbcEventType::OpenTryChannel - } + fn event_type() -> IbcEventType { + IbcEventType::OpenTryChannel + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenAck { - pub height: Height, - pub port_id: PortId, - pub channel_id: Option, - pub counterparty_channel_id: Option, - pub connection_id: ConnectionId, - pub counterparty_port_id: PortId, + pub height: Height, + pub port_id: PortId, + pub channel_id: Option, + pub counterparty_channel_id: Option, + pub connection_id: ConnectionId, + pub counterparty_port_id: PortId, } impl From for Attributes { - fn from(ev: OpenAck) -> Self { - Self { - height: ev.height, - port_id: ev.port_id, - channel_id: ev.channel_id, - connection_id: ev.connection_id, - counterparty_port_id: ev.counterparty_port_id, - counterparty_channel_id: ev.counterparty_channel_id, - } - } + fn from(ev: OpenAck) -> Self { + Self { + height: ev.height, + port_id: ev.port_id, + channel_id: ev.channel_id, + connection_id: ev.connection_id, + counterparty_port_id: ev.counterparty_port_id, + counterparty_channel_id: ev.counterparty_channel_id, + } + } } impl OpenAck { - pub fn channel_id(&self) -> Option<&ChannelId> { - self.channel_id.as_ref() - } - pub fn port_id(&self) -> &PortId { - &self.port_id - } - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - - pub fn counterparty_channel_id(&self) -> Option<&ChannelId> { - self.counterparty_channel_id.as_ref() - } + pub fn channel_id(&self) -> Option<&ChannelId> { + self.channel_id.as_ref() + } + pub fn port_id(&self) -> &PortId { + &self.port_id + } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + + pub fn counterparty_channel_id(&self) -> Option<&ChannelId> { + self.counterparty_channel_id.as_ref() + } } impl From for IbcEvent { - fn from(v: OpenAck) -> Self { - IbcEvent::OpenAckChannel(v) - } + fn from(v: OpenAck) -> Self { + IbcEvent::OpenAckChannel(v) + } } impl EventType for OpenAck { - fn event_type() -> IbcEventType { - IbcEventType::OpenAckChannel - } + fn event_type() -> IbcEventType { + IbcEventType::OpenAckChannel + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct OpenConfirm { - pub height: Height, - pub port_id: PortId, - pub channel_id: Option, - pub connection_id: ConnectionId, - pub counterparty_port_id: PortId, - pub counterparty_channel_id: Option, + pub height: Height, + pub port_id: PortId, + pub channel_id: Option, + pub connection_id: ConnectionId, + pub counterparty_port_id: PortId, + pub counterparty_channel_id: Option, } impl From for Attributes { - fn from(ev: OpenConfirm) -> Self { - Self { - height: ev.height, - port_id: ev.port_id, - channel_id: ev.channel_id, - connection_id: ev.connection_id, - counterparty_port_id: ev.counterparty_port_id, - counterparty_channel_id: ev.counterparty_channel_id, - } - } + fn from(ev: OpenConfirm) -> Self { + Self { + height: ev.height, + port_id: ev.port_id, + channel_id: ev.channel_id, + connection_id: ev.connection_id, + counterparty_port_id: ev.counterparty_port_id, + counterparty_channel_id: ev.counterparty_channel_id, + } + } } impl OpenConfirm { - pub fn channel_id(&self) -> Option<&ChannelId> { - self.channel_id.as_ref() - } - pub fn port_id(&self) -> &PortId { - &self.port_id - } - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } + pub fn channel_id(&self) -> Option<&ChannelId> { + self.channel_id.as_ref() + } + pub fn port_id(&self) -> &PortId { + &self.port_id + } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } } impl From for IbcEvent { - fn from(v: OpenConfirm) -> Self { - IbcEvent::OpenConfirmChannel(v) - } + fn from(v: OpenConfirm) -> Self { + IbcEvent::OpenConfirmChannel(v) + } } impl EventType for OpenConfirm { - fn event_type() -> IbcEventType { - IbcEventType::OpenConfirmChannel - } + fn event_type() -> IbcEventType { + IbcEventType::OpenConfirmChannel + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct CloseInit { - pub height: Height, - pub port_id: PortId, - pub channel_id: ChannelId, - pub connection_id: ConnectionId, - pub counterparty_port_id: PortId, - pub counterparty_channel_id: Option, + pub height: Height, + pub port_id: PortId, + pub channel_id: ChannelId, + pub connection_id: ConnectionId, + pub counterparty_port_id: PortId, + pub counterparty_channel_id: Option, } impl From for Attributes { - fn from(ev: CloseInit) -> Self { - Self { - height: ev.height, - port_id: ev.port_id, - channel_id: Some(ev.channel_id), - connection_id: ev.connection_id, - counterparty_port_id: ev.counterparty_port_id, - counterparty_channel_id: ev.counterparty_channel_id, - } - } + fn from(ev: CloseInit) -> Self { + Self { + height: ev.height, + port_id: ev.port_id, + channel_id: Some(ev.channel_id), + connection_id: ev.connection_id, + counterparty_port_id: ev.counterparty_port_id, + counterparty_channel_id: ev.counterparty_channel_id, + } + } } impl CloseInit { - pub fn port_id(&self) -> &PortId { - &self.port_id - } + pub fn port_id(&self) -> &PortId { + &self.port_id + } - pub fn channel_id(&self) -> &ChannelId { - &self.channel_id - } + pub fn channel_id(&self) -> &ChannelId { + &self.channel_id + } - pub fn counterparty_port_id(&self) -> &PortId { - &self.counterparty_port_id - } + pub fn counterparty_port_id(&self) -> &PortId { + &self.counterparty_port_id + } - pub fn counterparty_channel_id(&self) -> Option<&ChannelId> { - self.counterparty_channel_id.as_ref() - } + pub fn counterparty_channel_id(&self) -> Option<&ChannelId> { + self.counterparty_channel_id.as_ref() + } - pub fn height(&self) -> Height { - self.height - } + pub fn height(&self) -> Height { + self.height + } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } } impl TryFrom for CloseInit { - type Error = EventError; - fn try_from(attrs: Attributes) -> Result { - if let Some(channel_id) = attrs.channel_id() { - Ok(CloseInit { - height: attrs.height, - port_id: attrs.port_id.clone(), - channel_id: *channel_id, - connection_id: attrs.connection_id.clone(), - counterparty_port_id: attrs.counterparty_port_id.clone(), - counterparty_channel_id: attrs.counterparty_channel_id, - }) - } else { - Err(EventError::channel(Error::missing_channel_id())) - } - } + type Error = EventError; + fn try_from(attrs: Attributes) -> Result { + if let Some(channel_id) = attrs.channel_id() { + Ok(CloseInit { + height: attrs.height, + port_id: attrs.port_id.clone(), + channel_id: *channel_id, + connection_id: attrs.connection_id.clone(), + counterparty_port_id: attrs.counterparty_port_id.clone(), + counterparty_channel_id: attrs.counterparty_channel_id, + }) + } else { + Err(EventError::channel(Error::missing_channel_id())) + } + } } impl From for IbcEvent { - fn from(v: CloseInit) -> Self { - IbcEvent::CloseInitChannel(v) - } + fn from(v: CloseInit) -> Self { + IbcEvent::CloseInitChannel(v) + } } impl core::fmt::Display for CloseInit { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!( - f, - "{} {} {:?}", - self.height(), - IbcEventType::CloseInitChannel.as_str(), - Attributes::from(self.clone()) - ) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!( + f, + "{} {} {:?}", + self.height(), + IbcEventType::CloseInitChannel.as_str(), + Attributes::from(self.clone()) + ) + } } impl EventType for CloseInit { - fn event_type() -> IbcEventType { - IbcEventType::CloseInitChannel - } + fn event_type() -> IbcEventType { + IbcEventType::CloseInitChannel + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct CloseConfirm { - pub height: Height, - pub channel_id: Option, - pub port_id: PortId, - pub connection_id: ConnectionId, - pub counterparty_port_id: PortId, - pub counterparty_channel_id: Option, + pub height: Height, + pub channel_id: Option, + pub port_id: PortId, + pub connection_id: ConnectionId, + pub counterparty_port_id: PortId, + pub counterparty_channel_id: Option, } impl From for Attributes { - fn from(ev: CloseConfirm) -> Self { - Self { - height: ev.height, - port_id: ev.port_id, - channel_id: ev.channel_id, - connection_id: ev.connection_id, - counterparty_port_id: ev.counterparty_port_id, - counterparty_channel_id: ev.counterparty_channel_id, - } - } + fn from(ev: CloseConfirm) -> Self { + Self { + height: ev.height, + port_id: ev.port_id, + channel_id: ev.channel_id, + connection_id: ev.connection_id, + counterparty_port_id: ev.counterparty_port_id, + counterparty_channel_id: ev.counterparty_channel_id, + } + } } impl CloseConfirm { - pub fn channel_id(&self) -> Option<&ChannelId> { - self.channel_id.as_ref() - } - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } + pub fn channel_id(&self) -> Option<&ChannelId> { + self.channel_id.as_ref() + } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } } impl From for IbcEvent { - fn from(v: CloseConfirm) -> Self { - IbcEvent::CloseConfirmChannel(v) - } + fn from(v: CloseConfirm) -> Self { + IbcEvent::CloseConfirmChannel(v) + } } impl EventType for CloseConfirm { - fn event_type() -> IbcEventType { - IbcEventType::CloseConfirmChannel - } + fn event_type() -> IbcEventType { + IbcEventType::CloseConfirmChannel + } } macro_rules! impl_try_from_attribute_for_event { @@ -719,10 +724,10 @@ macro_rules! impl_from_ibc_to_abci_event { ($($event:ty),+) => { $(impl From<$event> for AbciEvent { fn from(v: $event) -> Self { - let attributes = Vec::::from(Attributes::from(v)); - let type_str = <$event>::event_type().as_str().to_string(); + let attributes = Vec::::from(Attributes::from(v)); + let kind = <$event>::event_type().as_str().to_string(); AbciEvent { - type_str, + kind, attributes, } } @@ -730,14 +735,7 @@ macro_rules! impl_from_ibc_to_abci_event { }; } -impl_from_ibc_to_abci_event!( - OpenInit, - OpenTry, - OpenAck, - OpenConfirm, - CloseInit, - CloseConfirm -); +impl_from_ibc_to_abci_event!(OpenInit, OpenTry, OpenAck, OpenConfirm, CloseInit, CloseConfirm); macro_rules! impl_try_from_raw_obj_for_event { ($($event:ty),+) => { @@ -751,353 +749,320 @@ macro_rules! impl_try_from_raw_obj_for_event { }; } -impl_try_from_raw_obj_for_event!( - OpenInit, - OpenTry, - OpenAck, - OpenConfirm, - CloseInit, - CloseConfirm -); +impl_try_from_raw_obj_for_event!(OpenInit, OpenTry, OpenAck, OpenConfirm, CloseInit, CloseConfirm); #[derive(Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct SendPacket { - pub height: Height, - pub packet: Packet, + pub height: Height, + pub packet: Packet, } impl SendPacket { - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - pub fn src_port_id(&self) -> &PortId { - &self.packet.source_port - } - pub fn src_channel_id(&self) -> &ChannelId { - &self.packet.source_channel - } - pub fn dst_port_id(&self) -> &PortId { - &self.packet.destination_port - } - pub fn dst_channel_id(&self) -> &ChannelId { - &self.packet.destination_channel - } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + pub fn src_port_id(&self) -> &PortId { + &self.packet.source_port + } + pub fn src_channel_id(&self) -> &ChannelId { + &self.packet.source_channel + } + pub fn dst_port_id(&self) -> &PortId { + &self.packet.destination_port + } + pub fn dst_channel_id(&self) -> &ChannelId { + &self.packet.destination_channel + } } impl From for IbcEvent { - fn from(v: SendPacket) -> Self { - IbcEvent::SendPacket(v) - } + fn from(v: SendPacket) -> Self { + IbcEvent::SendPacket(v) + } } impl TryFrom for AbciEvent { - type Error = Error; + type Error = Error; - fn try_from(v: SendPacket) -> Result { - let attributes = Vec::::try_from(v.packet)?; - Ok(AbciEvent { - type_str: IbcEventType::SendPacket.as_str().to_string(), - attributes, - }) - } + fn try_from(v: SendPacket) -> Result { + let attributes = Vec::::try_from(v.packet)?; + Ok(AbciEvent { kind: IbcEventType::SendPacket.as_str().to_string(), attributes }) + } } impl core::fmt::Display for SendPacket { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "SendPacket - h:{}, {}", self.height, self.packet) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "SendPacket - h:{}, {}", self.height, self.packet) + } } impl core::fmt::Debug for SendPacket { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "SendPacket - h:{}, {}", self.height, self.packet) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "SendPacket - h:{}, {}", self.height, self.packet) + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct ReceivePacket { - pub height: Height, - pub packet: Packet, + pub height: Height, + pub packet: Packet, } impl ReceivePacket { - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - pub fn src_port_id(&self) -> &PortId { - &self.packet.source_port - } - pub fn src_channel_id(&self) -> &ChannelId { - &self.packet.source_channel - } - pub fn dst_port_id(&self) -> &PortId { - &self.packet.destination_port - } - pub fn dst_channel_id(&self) -> &ChannelId { - &self.packet.destination_channel - } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + pub fn src_port_id(&self) -> &PortId { + &self.packet.source_port + } + pub fn src_channel_id(&self) -> &ChannelId { + &self.packet.source_channel + } + pub fn dst_port_id(&self) -> &PortId { + &self.packet.destination_port + } + pub fn dst_channel_id(&self) -> &ChannelId { + &self.packet.destination_channel + } } impl From for IbcEvent { - fn from(v: ReceivePacket) -> Self { - IbcEvent::ReceivePacket(v) - } + fn from(v: ReceivePacket) -> Self { + IbcEvent::ReceivePacket(v) + } } impl TryFrom for AbciEvent { - type Error = Error; + type Error = Error; - fn try_from(v: ReceivePacket) -> Result { - let attributes = Vec::::try_from(v.packet)?; - Ok(AbciEvent { - type_str: IbcEventType::ReceivePacket.as_str().to_string(), - attributes, - }) - } + fn try_from(v: ReceivePacket) -> Result { + let attributes = Vec::::try_from(v.packet)?; + Ok(AbciEvent { kind: IbcEventType::ReceivePacket.as_str().to_string(), attributes }) + } } impl core::fmt::Display for ReceivePacket { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "ReceivePacket - h:{}, {}", self.height, self.packet) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "ReceivePacket - h:{}, {}", self.height, self.packet) + } } #[derive(Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct WriteAcknowledgement { - pub height: Height, - pub packet: Packet, - #[serde(serialize_with = "crate::serializers::ser_hex_upper")] - pub ack: Vec, + pub height: Height, + pub packet: Packet, + #[serde( + serialize_with = "crate::serializers::ser_hex_upper", + deserialize_with = "crate::serializers::deser_hex_upper" + )] + pub ack: Vec, } impl WriteAcknowledgement { - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - pub fn src_port_id(&self) -> &PortId { - &self.packet.source_port - } - pub fn src_channel_id(&self) -> &ChannelId { - &self.packet.source_channel - } - pub fn dst_port_id(&self) -> &PortId { - &self.packet.destination_port - } - pub fn dst_channel_id(&self) -> &ChannelId { - &self.packet.destination_channel - } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + pub fn src_port_id(&self) -> &PortId { + &self.packet.source_port + } + pub fn src_channel_id(&self) -> &ChannelId { + &self.packet.source_channel + } + pub fn dst_port_id(&self) -> &PortId { + &self.packet.destination_port + } + pub fn dst_channel_id(&self) -> &ChannelId { + &self.packet.destination_channel + } } impl From for IbcEvent { - fn from(v: WriteAcknowledgement) -> Self { - IbcEvent::WriteAcknowledgement(v) - } + fn from(v: WriteAcknowledgement) -> Self { + IbcEvent::WriteAcknowledgement(v) + } } impl TryFrom for AbciEvent { - type Error = Error; - - fn try_from(v: WriteAcknowledgement) -> Result { - let mut attributes = Vec::::try_from(v.packet)?; - let val = - String::from_utf8(v.ack).expect("hex-encoded string should always be valid UTF-8"); - // No actual conversion from string to `Tag::Key` or `Tag::Value` - let ack = Tag { - key: PKT_ACK_ATTRIBUTE_KEY.parse().unwrap(), - value: val.parse().unwrap(), - }; - attributes.push(ack); - Ok(AbciEvent { - type_str: IbcEventType::WriteAck.as_str().to_string(), - attributes, - }) - } + type Error = Error; + + fn try_from(v: WriteAcknowledgement) -> Result { + let mut attributes = Vec::::try_from(v.packet)?; + let val = + String::from_utf8(v.ack).expect("hex-encoded string should always be valid UTF-8"); + // No actual conversion from string to `EventAttribute::Key` or `EventAttribute::Value` + let ack = EventAttribute { + key: PKT_ACK_ATTRIBUTE_KEY.parse().unwrap(), + value: val.parse().unwrap(), + index: false, + }; + attributes.push(ack); + Ok(AbciEvent { kind: IbcEventType::WriteAck.as_str().to_string(), attributes }) + } } impl core::fmt::Display for WriteAcknowledgement { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!( - f, - "WriteAcknowledgement - h:{}, {}", - self.height, self.packet - ) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "WriteAcknowledgement - h:{}, {}", self.height, self.packet) + } } impl core::fmt::Debug for WriteAcknowledgement { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!( - f, - "WriteAcknowledgement - h:{}, {}", - self.height, self.packet - ) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "WriteAcknowledgement - h:{}, {}", self.height, self.packet) + } } #[derive(Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct AcknowledgePacket { - pub height: Height, - pub packet: Packet, + pub height: Height, + pub packet: Packet, } impl AcknowledgePacket { - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - pub fn src_port_id(&self) -> &PortId { - &self.packet.source_port - } - pub fn src_channel_id(&self) -> &ChannelId { - &self.packet.source_channel - } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + pub fn src_port_id(&self) -> &PortId { + &self.packet.source_port + } + pub fn src_channel_id(&self) -> &ChannelId { + &self.packet.source_channel + } } impl From for IbcEvent { - fn from(v: AcknowledgePacket) -> Self { - IbcEvent::AcknowledgePacket(v) - } + fn from(v: AcknowledgePacket) -> Self { + IbcEvent::AcknowledgePacket(v) + } } impl TryFrom for AbciEvent { - type Error = Error; + type Error = Error; - fn try_from(v: AcknowledgePacket) -> Result { - let attributes = Vec::::try_from(v.packet)?; - Ok(AbciEvent { - type_str: IbcEventType::AckPacket.as_str().to_string(), - attributes, - }) - } + fn try_from(v: AcknowledgePacket) -> Result { + let attributes = Vec::::try_from(v.packet)?; + Ok(AbciEvent { kind: IbcEventType::AckPacket.as_str().to_string(), attributes }) + } } impl core::fmt::Display for AcknowledgePacket { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "h:{}, {}", self.height, self.packet) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "h:{}, {}", self.height, self.packet) + } } impl core::fmt::Debug for AcknowledgePacket { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "AcknowledgePacket - h:{}, {}", self.height, self.packet) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "AcknowledgePacket - h:{}, {}", self.height, self.packet) + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct TimeoutPacket { - pub height: Height, - pub packet: Packet, + pub height: Height, + pub packet: Packet, } impl TimeoutPacket { - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - pub fn src_port_id(&self) -> &PortId { - &self.packet.source_port - } - pub fn src_channel_id(&self) -> &ChannelId { - &self.packet.source_channel - } - pub fn dst_port_id(&self) -> &PortId { - &self.packet.destination_port - } - pub fn dst_channel_id(&self) -> &ChannelId { - &self.packet.destination_channel - } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + pub fn src_port_id(&self) -> &PortId { + &self.packet.source_port + } + pub fn src_channel_id(&self) -> &ChannelId { + &self.packet.source_channel + } + pub fn dst_port_id(&self) -> &PortId { + &self.packet.destination_port + } + pub fn dst_channel_id(&self) -> &ChannelId { + &self.packet.destination_channel + } } impl From for IbcEvent { - fn from(v: TimeoutPacket) -> Self { - IbcEvent::TimeoutPacket(v) - } + fn from(v: TimeoutPacket) -> Self { + IbcEvent::TimeoutPacket(v) + } } impl TryFrom for AbciEvent { - type Error = Error; + type Error = Error; - fn try_from(v: TimeoutPacket) -> Result { - let attributes = Vec::::try_from(v.packet)?; - Ok(AbciEvent { - type_str: IbcEventType::Timeout.as_str().to_string(), - attributes, - }) - } + fn try_from(v: TimeoutPacket) -> Result { + let attributes = Vec::::try_from(v.packet)?; + Ok(AbciEvent { kind: IbcEventType::Timeout.as_str().to_string(), attributes }) + } } impl core::fmt::Display for TimeoutPacket { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "TimeoutPacket - h:{}, {}", self.height, self.packet) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "TimeoutPacket - h:{}, {}", self.height, self.packet) + } } #[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)] pub struct TimeoutOnClosePacket { - pub height: Height, - pub packet: Packet, + pub height: Height, + pub packet: Packet, } impl TimeoutOnClosePacket { - pub fn height(&self) -> Height { - self.height - } - pub fn set_height(&mut self, height: Height) { - self.height = height; - } - pub fn src_port_id(&self) -> &PortId { - &self.packet.source_port - } - pub fn src_channel_id(&self) -> &ChannelId { - &self.packet.source_channel - } - pub fn dst_port_id(&self) -> &PortId { - &self.packet.destination_port - } - pub fn dst_channel_id(&self) -> &ChannelId { - &self.packet.destination_channel - } + pub fn height(&self) -> Height { + self.height + } + pub fn set_height(&mut self, height: Height) { + self.height = height; + } + pub fn src_port_id(&self) -> &PortId { + &self.packet.source_port + } + pub fn src_channel_id(&self) -> &ChannelId { + &self.packet.source_channel + } + pub fn dst_port_id(&self) -> &PortId { + &self.packet.destination_port + } + pub fn dst_channel_id(&self) -> &ChannelId { + &self.packet.destination_channel + } } impl From for IbcEvent { - fn from(v: TimeoutOnClosePacket) -> Self { - IbcEvent::TimeoutOnClosePacket(v) - } + fn from(v: TimeoutOnClosePacket) -> Self { + IbcEvent::TimeoutOnClosePacket(v) + } } impl TryFrom for AbciEvent { - type Error = Error; + type Error = Error; - fn try_from(v: TimeoutOnClosePacket) -> Result { - let attributes = Vec::::try_from(v.packet)?; - Ok(AbciEvent { - type_str: IbcEventType::TimeoutOnClose.as_str().to_string(), - attributes, - }) - } + fn try_from(v: TimeoutOnClosePacket) -> Result { + let attributes = Vec::::try_from(v.packet)?; + Ok(AbciEvent { kind: IbcEventType::TimeoutOnClose.as_str().to_string(), attributes }) + } } impl core::fmt::Display for TimeoutOnClosePacket { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!( - f, - "TimeoutOnClosePacket - h:{}, {}", - self.height, self.packet - ) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "TimeoutOnClosePacket - h:{}, {}", self.height, self.packet) + } } macro_rules! impl_try_from_raw_obj_for_packet { @@ -1119,141 +1084,127 @@ macro_rules! impl_try_from_raw_obj_for_packet { } impl_try_from_raw_obj_for_packet!( - SendPacket, - ReceivePacket, - AcknowledgePacket, - TimeoutPacket, - TimeoutOnClosePacket + SendPacket, + ReceivePacket, + AcknowledgePacket, + TimeoutPacket, + TimeoutOnClosePacket ); impl TryFrom> for WriteAcknowledgement { - type Error = EventError; + type Error = EventError; - fn try_from(obj: RawObject<'_>) -> Result { - let height = obj.height; - let data_str: String = - extract_attribute(&obj, &format!("{}.{}", obj.action, PKT_DATA_ATTRIBUTE_KEY))?; - let ack = extract_attribute(&obj, &format!("{}.{}", obj.action, PKT_ACK_ATTRIBUTE_KEY))? - .into_bytes(); + fn try_from(obj: RawObject<'_>) -> Result { + let height = obj.height; + let data_str: String = + extract_attribute(&obj, &format!("{}.{}", obj.action, PKT_DATA_ATTRIBUTE_KEY))?; + let ack = extract_attribute(&obj, &format!("{}.{}", obj.action, PKT_ACK_ATTRIBUTE_KEY))? + .into_bytes(); - let mut packet = Packet::try_from(obj)?; - packet.data = Vec::from(data_str.as_str().as_bytes()); + let mut packet = Packet::try_from(obj)?; + packet.data = Vec::from(data_str.as_str().as_bytes()); - Ok(Self { - height, - packet, - ack, - }) - } + Ok(Self { height, packet, ack }) + } } #[cfg(test)] mod tests { - use super::*; - use crate::core::ics04_channel::packet::Sequence; - use crate::timestamp::Timestamp; - - #[test] - fn channel_event_to_abci_event() { - let attributes = Attributes { - height: Height::default(), - port_id: "test_port".parse().unwrap(), - channel_id: Some("channel-0".parse().unwrap()), - connection_id: "test_connection".parse().unwrap(), - counterparty_port_id: "counterparty_test_port".parse().unwrap(), - counterparty_channel_id: Some("channel-1".parse().unwrap()), - }; - let mut abci_events = vec![]; - let open_init = OpenInit::try_from(attributes.clone()).unwrap(); - abci_events.push(AbciEvent::from(open_init.clone())); - let open_try = OpenTry::try_from(attributes.clone()).unwrap(); - abci_events.push(AbciEvent::from(open_try.clone())); - let open_ack = OpenAck::try_from(attributes.clone()).unwrap(); - abci_events.push(AbciEvent::from(open_ack.clone())); - let open_confirm = OpenConfirm::try_from(attributes.clone()).unwrap(); - abci_events.push(AbciEvent::from(open_confirm.clone())); - let close_init = CloseInit::try_from(attributes.clone()).unwrap(); - abci_events.push(AbciEvent::from(close_init.clone())); - let close_confirm = CloseConfirm::try_from(attributes).unwrap(); - abci_events.push(AbciEvent::from(close_confirm.clone())); - - for event in abci_events { - match try_from_tx(&event) { - Some(e) => match e { - IbcEvent::OpenInitChannel(e) => { - assert_eq!(Attributes::from(e), open_init.clone().into()) - } - IbcEvent::OpenTryChannel(e) => { - assert_eq!(Attributes::from(e), open_try.clone().into()) - } - IbcEvent::OpenAckChannel(e) => { - assert_eq!(Attributes::from(e), open_ack.clone().into()) - } - IbcEvent::OpenConfirmChannel(e) => { - assert_eq!(Attributes::from(e), open_confirm.clone().into()) - } - IbcEvent::CloseInitChannel(e) => { - assert_eq!(Attributes::from(e), close_init.clone().into()) - } - IbcEvent::CloseConfirmChannel(e) => { - assert_eq!(Attributes::from(e), close_confirm.clone().into()) - } - _ => panic!("unexpected event type"), - }, - None => panic!("converted event was wrong"), - } - } - } - - #[test] - fn packet_event_to_abci_event() { - let packet = Packet { - sequence: Sequence::from(10), - source_port: "a_test_port".parse().unwrap(), - source_channel: "channel-0".parse().unwrap(), - destination_port: "b_test_port".parse().unwrap(), - destination_channel: "channel-1".parse().unwrap(), - data: "test_data".as_bytes().to_vec(), - timeout_height: Height::new(1, 10), - timeout_timestamp: Timestamp::now(), - }; - let mut abci_events = vec![]; - let send_packet = SendPacket { - height: Height::default(), - packet: packet.clone(), - }; - abci_events.push(AbciEvent::try_from(send_packet.clone()).unwrap()); - let write_ack = WriteAcknowledgement { - height: Height::default(), - packet: packet.clone(), - ack: "test_ack".as_bytes().to_vec(), - }; - abci_events.push(AbciEvent::try_from(write_ack.clone()).unwrap()); - let ack_packet = AcknowledgePacket { - height: Height::default(), - packet: packet.clone(), - }; - abci_events.push(AbciEvent::try_from(ack_packet.clone()).unwrap()); - let timeout_packet = TimeoutPacket { - height: Height::default(), - packet, - }; - abci_events.push(AbciEvent::try_from(timeout_packet.clone()).unwrap()); - - for event in abci_events { - match try_from_tx(&event) { - Some(e) => match e { - IbcEvent::SendPacket(e) => assert_eq!(e.packet, send_packet.packet), - IbcEvent::WriteAcknowledgement(e) => { - assert_eq!(e.packet, write_ack.packet); - assert_eq!(e.ack, write_ack.ack); - } - IbcEvent::AcknowledgePacket(e) => assert_eq!(e.packet, ack_packet.packet), - IbcEvent::TimeoutPacket(e) => assert_eq!(e.packet, timeout_packet.packet), - _ => panic!("unexpected event type"), - }, - None => panic!("converted event was wrong"), - } - } - } + use super::*; + use crate::{core::ics04_channel::packet::Sequence, timestamp::Timestamp}; + + #[test] + fn channel_event_to_abci_event() { + let attributes = Attributes { + height: Height::default(), + port_id: "test_port".parse().unwrap(), + channel_id: Some("channel-0".parse().unwrap()), + connection_id: "test_connection".parse().unwrap(), + counterparty_port_id: "counterparty_test_port".parse().unwrap(), + counterparty_channel_id: Some("channel-1".parse().unwrap()), + }; + let mut abci_events = vec![]; + let open_init = OpenInit::try_from(attributes.clone()).unwrap(); + abci_events.push(AbciEvent::from(open_init.clone())); + let open_try = OpenTry::try_from(attributes.clone()).unwrap(); + abci_events.push(AbciEvent::from(open_try.clone())); + let open_ack = OpenAck::try_from(attributes.clone()).unwrap(); + abci_events.push(AbciEvent::from(open_ack.clone())); + let open_confirm = OpenConfirm::try_from(attributes.clone()).unwrap(); + abci_events.push(AbciEvent::from(open_confirm.clone())); + let close_init = CloseInit::try_from(attributes.clone()).unwrap(); + abci_events.push(AbciEvent::from(close_init.clone())); + let close_confirm = CloseConfirm::try_from(attributes).unwrap(); + abci_events.push(AbciEvent::from(close_confirm.clone())); + + for event in abci_events { + match try_from_tx(&event) { + Some(e) => match e { + IbcEvent::OpenInitChannel(e) => { + assert_eq!(Attributes::from(e), open_init.clone().into()) + }, + IbcEvent::OpenTryChannel(e) => { + assert_eq!(Attributes::from(e), open_try.clone().into()) + }, + IbcEvent::OpenAckChannel(e) => { + assert_eq!(Attributes::from(e), open_ack.clone().into()) + }, + IbcEvent::OpenConfirmChannel(e) => { + assert_eq!(Attributes::from(e), open_confirm.clone().into()) + }, + IbcEvent::CloseInitChannel(e) => { + assert_eq!(Attributes::from(e), close_init.clone().into()) + }, + IbcEvent::CloseConfirmChannel(e) => { + assert_eq!(Attributes::from(e), close_confirm.clone().into()) + }, + _ => panic!("unexpected event type"), + }, + None => panic!("converted event was wrong"), + } + } + } + + #[test] + fn packet_event_to_abci_event() { + let packet = Packet { + sequence: Sequence::from(10), + source_port: "a_test_port".parse().unwrap(), + source_channel: "channel-0".parse().unwrap(), + destination_port: "b_test_port".parse().unwrap(), + destination_channel: "channel-1".parse().unwrap(), + data: "test_data".as_bytes().to_vec(), + timeout_height: Height::new(1, 10), + timeout_timestamp: Timestamp::now(), + }; + let mut abci_events = vec![]; + let send_packet = SendPacket { height: Height::default(), packet: packet.clone() }; + abci_events.push(AbciEvent::try_from(send_packet.clone()).unwrap()); + let write_ack = WriteAcknowledgement { + height: Height::default(), + packet: packet.clone(), + ack: "test_ack".as_bytes().to_vec(), + }; + abci_events.push(AbciEvent::try_from(write_ack.clone()).unwrap()); + let ack_packet = AcknowledgePacket { height: Height::default(), packet: packet.clone() }; + abci_events.push(AbciEvent::try_from(ack_packet.clone()).unwrap()); + let timeout_packet = TimeoutPacket { height: Height::default(), packet }; + abci_events.push(AbciEvent::try_from(timeout_packet.clone()).unwrap()); + + for event in abci_events { + match try_from_tx(&event) { + Some(e) => match e { + IbcEvent::SendPacket(e) => assert_eq!(e.packet, send_packet.packet), + IbcEvent::WriteAcknowledgement(e) => { + assert_eq!(e.packet, write_ack.packet); + assert_eq!(e.ack, write_ack.ack); + }, + IbcEvent::AcknowledgePacket(e) => assert_eq!(e.packet, ack_packet.packet), + IbcEvent::TimeoutPacket(e) => assert_eq!(e.packet, timeout_packet.packet), + _ => panic!("unexpected event type"), + }, + None => panic!("converted event was wrong"), + } + } + } } diff --git a/modules/src/core/ics04_channel/handler.rs b/modules/src/core/ics04_channel/handler.rs index 0f428ea3cd..03834eeffc 100644 --- a/modules/src/core/ics04_channel/handler.rs +++ b/modules/src/core/ics04_channel/handler.rs @@ -1,15 +1,21 @@ //! This module implements the processing logic for ICS4 (channel) messages. -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::context::ChannelReader; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::msgs::ChannelMsg; -use crate::core::ics04_channel::{msgs::PacketMsg, packet::PacketResult}; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::core::ics26_routing::context::{ - Ics26Context, ModuleId, ModuleOutputBuilder, OnRecvPacketAck, Router, +use crate::{ + core::{ + ics04_channel::{ + channel::ChannelEnd, + error::Error, + msgs::{ChannelMsg, PacketMsg}, + packet::PacketResult, + }, + ics24_host::identifier::{ChannelId, PortId}, + ics26_routing::context::{ + Ics26Context, ModuleId, ModuleOutputBuilder, OnRecvPacketAck, ReaderContext, Router, + }, + }, + handler::{HandlerOutput, HandlerOutputBuilder}, }; -use crate::handler::{HandlerOutput, HandlerOutputBuilder}; +use core::fmt::Debug; pub mod acknowledgement; pub mod chan_close_confirm; @@ -28,202 +34,180 @@ pub mod write_acknowledgement; /// Defines the possible states of a channel identifier in a `ChannelResult`. #[derive(Clone, Debug)] pub enum ChannelIdState { - /// Specifies that the channel handshake handler allocated a new channel identifier. This - /// happens during the processing of either the `MsgChannelOpenInit` or `MsgChannelOpenTry`. - Generated, + /// Specifies that the channel handshake handler allocated a new channel identifier. This + /// happens during the processing of either the `MsgChannelOpenInit` or `MsgChannelOpenTry`. + Generated, - /// Specifies that the handler reused a previously-allocated channel identifier. - Reused, + /// Specifies that the handler reused a previously-allocated channel identifier. + Reused, } #[derive(Clone, Debug)] pub struct ChannelResult { - pub port_id: PortId, - pub channel_id: ChannelId, - pub channel_id_state: ChannelIdState, - pub channel_end: ChannelEnd, + pub port_id: PortId, + pub channel_id: ChannelId, + pub channel_id_state: ChannelIdState, + pub channel_end: ChannelEnd, } pub fn channel_validate(ctx: &Ctx, msg: &ChannelMsg) -> Result where - Ctx: Ics26Context, + Ctx: Ics26Context, { - let module_id = msg.lookup_module(ctx)?; - if ctx.router().has_route(&module_id) { - Ok(module_id) - } else { - Err(Error::route_not_found()) - } + let module_id = msg.lookup_module(ctx)?; + if ctx.router().has_route(&module_id) { + Ok(module_id) + } else { + Err(Error::route_not_found()) + } } /// General entry point for processing any type of message related to the ICS4 channel open and /// channel close handshake protocols. pub fn channel_dispatch( - ctx: &Ctx, - msg: &ChannelMsg, + ctx: &Ctx, + msg: &ChannelMsg, ) -> Result<(HandlerOutputBuilder<()>, ChannelResult), Error> where - Ctx: ChannelReader, + Ctx: ReaderContext, { - let output = match msg { - ChannelMsg::ChannelOpenInit(msg) => chan_open_init::process(ctx, msg), - ChannelMsg::ChannelOpenTry(msg) => chan_open_try::process(ctx, msg), - ChannelMsg::ChannelOpenAck(msg) => chan_open_ack::process(ctx, msg), - ChannelMsg::ChannelOpenConfirm(msg) => chan_open_confirm::process(ctx, msg), - ChannelMsg::ChannelCloseInit(msg) => chan_close_init::process(ctx, msg), - ChannelMsg::ChannelCloseConfirm(msg) => chan_close_confirm::process(ctx, msg), - }?; - let HandlerOutput { - result, - log, - events, - } = output; - let builder = HandlerOutput::builder().with_log(log).with_events(events); - Ok((builder, result)) + let output = match msg { + ChannelMsg::ChannelOpenInit(msg) => chan_open_init::process(ctx, msg), + ChannelMsg::ChannelOpenTry(msg) => chan_open_try::process::<_>(ctx, msg), + ChannelMsg::ChannelOpenAck(msg) => chan_open_ack::process::<_>(ctx, msg), + ChannelMsg::ChannelOpenConfirm(msg) => chan_open_confirm::process::<_>(ctx, msg), + ChannelMsg::ChannelCloseInit(msg) => chan_close_init::process(ctx, msg), + ChannelMsg::ChannelCloseConfirm(msg) => chan_close_confirm::process::<_>(ctx, msg), + }?; + let HandlerOutput { result, log, events } = output; + let builder = HandlerOutput::builder().with_log(log).with_events(events); + Ok((builder, result)) } pub fn channel_callback( - ctx: &mut Ctx, - module_id: &ModuleId, - msg: &ChannelMsg, - mut result: ChannelResult, - module_output: &mut ModuleOutputBuilder, + ctx: &mut Ctx, + module_id: &ModuleId, + msg: &ChannelMsg, + mut result: ChannelResult, + module_output: &mut ModuleOutputBuilder, ) -> Result where - Ctx: Ics26Context, + Ctx: Ics26Context, { - let cb = ctx - .router_mut() - .get_route_mut(module_id) - .ok_or_else(Error::route_not_found)?; + let cb = ctx.router_mut().get_route_mut(module_id).ok_or_else(Error::route_not_found)?; - match msg { - ChannelMsg::ChannelOpenInit(msg) => cb.on_chan_open_init( - module_output, - msg.channel.ordering, - &msg.channel.connection_hops, - &msg.port_id, - &result.channel_id, - msg.channel.counterparty(), - &msg.channel.version, - )?, - ChannelMsg::ChannelOpenTry(msg) => { - let version = cb.on_chan_open_try( - module_output, - msg.channel.ordering, - &msg.channel.connection_hops, - &msg.port_id, - &result.channel_id, - msg.channel.counterparty(), - msg.channel.version(), - &msg.counterparty_version, - )?; - result.channel_end.version = version; - } - ChannelMsg::ChannelOpenAck(msg) => cb.on_chan_open_ack( - module_output, - &msg.port_id, - &result.channel_id, - &msg.counterparty_version, - )?, - ChannelMsg::ChannelOpenConfirm(msg) => { - cb.on_chan_open_confirm(module_output, &msg.port_id, &result.channel_id)? - } - ChannelMsg::ChannelCloseInit(msg) => { - cb.on_chan_close_init(module_output, &msg.port_id, &result.channel_id)? - } - ChannelMsg::ChannelCloseConfirm(msg) => { - cb.on_chan_close_confirm(module_output, &msg.port_id, &result.channel_id)? - } - } - Ok(result) + match msg { + ChannelMsg::ChannelOpenInit(msg) => cb.on_chan_open_init( + module_output, + msg.channel.ordering, + &msg.channel.connection_hops, + &msg.port_id, + &result.channel_id, + msg.channel.counterparty(), + &msg.channel.version, + )?, + ChannelMsg::ChannelOpenTry(msg) => { + let version = cb.on_chan_open_try( + module_output, + msg.channel.ordering, + &msg.channel.connection_hops, + &msg.port_id, + &result.channel_id, + msg.channel.counterparty(), + msg.channel.version(), + &msg.counterparty_version, + )?; + result.channel_end.version = version; + }, + ChannelMsg::ChannelOpenAck(msg) => cb.on_chan_open_ack( + module_output, + &msg.port_id, + &result.channel_id, + &msg.counterparty_version, + )?, + ChannelMsg::ChannelOpenConfirm(msg) => + cb.on_chan_open_confirm(module_output, &msg.port_id, &result.channel_id)?, + ChannelMsg::ChannelCloseInit(msg) => + cb.on_chan_close_init(module_output, &msg.port_id, &result.channel_id)?, + ChannelMsg::ChannelCloseConfirm(msg) => + cb.on_chan_close_confirm(module_output, &msg.port_id, &result.channel_id)?, + } + Ok(result) } pub fn get_module_for_packet_msg(ctx: &Ctx, msg: &PacketMsg) -> Result where - Ctx: Ics26Context, + Ctx: Ics26Context, { - let module_id = match msg { - PacketMsg::RecvPacket(msg) => ctx - .lookup_module_by_port(&msg.packet.destination_port) - .map_err(Error::ics05_port)?, - PacketMsg::AckPacket(msg) => ctx - .lookup_module_by_port(&msg.packet.source_port) - .map_err(Error::ics05_port)?, - PacketMsg::ToPacket(msg) => ctx - .lookup_module_by_port(&msg.packet.source_port) - .map_err(Error::ics05_port)?, - PacketMsg::ToClosePacket(msg) => ctx - .lookup_module_by_port(&msg.packet.source_port) - .map_err(Error::ics05_port)?, - }; + let module_id = match msg { + PacketMsg::RecvPacket(msg) => ctx + .lookup_module_by_port(&msg.packet.destination_port) + .map_err(Error::ics05_port)?, + PacketMsg::AckPacket(msg) => + ctx.lookup_module_by_port(&msg.packet.source_port).map_err(Error::ics05_port)?, + PacketMsg::ToPacket(msg) => + ctx.lookup_module_by_port(&msg.packet.source_port).map_err(Error::ics05_port)?, + PacketMsg::ToClosePacket(msg) => + ctx.lookup_module_by_port(&msg.packet.source_port).map_err(Error::ics05_port)?, + }; - if ctx.router().has_route(&module_id) { - Ok(module_id) - } else { - Err(Error::route_not_found()) - } + if ctx.router().has_route(&module_id) { + Ok(module_id) + } else { + Err(Error::route_not_found()) + } } /// Dispatcher for processing any type of message related to the ICS4 packet protocols. pub fn packet_dispatch( - ctx: &Ctx, - msg: &PacketMsg, + ctx: &Ctx, + msg: &PacketMsg, ) -> Result<(HandlerOutputBuilder<()>, PacketResult), Error> where - Ctx: ChannelReader, + Ctx: ReaderContext, { - let output = match msg { - PacketMsg::RecvPacket(msg) => recv_packet::process(ctx, msg), - PacketMsg::AckPacket(msg) => acknowledgement::process(ctx, msg), - PacketMsg::ToPacket(msg) => timeout::process(ctx, msg), - PacketMsg::ToClosePacket(msg) => timeout_on_close::process(ctx, msg), - }?; - let HandlerOutput { - result, - log, - events, - } = output; - let builder = HandlerOutput::builder().with_log(log).with_events(events); - Ok((builder, result)) + let output = match msg { + PacketMsg::RecvPacket(msg) => recv_packet::process::<_>(ctx, msg), + PacketMsg::AckPacket(msg) => acknowledgement::process::<_>(ctx, msg), + PacketMsg::ToPacket(msg) => timeout::process::<_>(ctx, msg), + PacketMsg::ToClosePacket(msg) => timeout_on_close::process::<_>(ctx, msg), + }?; + let HandlerOutput { result, log, events } = output; + let builder = HandlerOutput::builder().with_log(log).with_events(events); + Ok((builder, result)) } pub fn packet_callback( - ctx: &mut Ctx, - module_id: &ModuleId, - msg: &PacketMsg, - module_output: &mut ModuleOutputBuilder, + ctx: &mut Ctx, + module_id: &ModuleId, + msg: &PacketMsg, + module_output: &mut ModuleOutputBuilder, ) -> Result<(), Error> where - Ctx: Ics26Context, + Ctx: Ics26Context, { - let cb = ctx - .router_mut() - .get_route_mut(module_id) - .ok_or_else(Error::route_not_found)?; + let cb = ctx.router_mut().get_route_mut(module_id).ok_or_else(Error::route_not_found)?; - match msg { - PacketMsg::RecvPacket(msg) => { - let result = cb.on_recv_packet(module_output, &msg.packet, &msg.signer); - match result { - OnRecvPacketAck::Nil(write_fn) | OnRecvPacketAck::Successful(_, write_fn) => { - write_fn(cb.as_any_mut()).map_err(Error::app_module)?; - } - OnRecvPacketAck::Failed(_) => {} - } - } - PacketMsg::AckPacket(msg) => cb.on_acknowledgement_packet( - module_output, - &msg.packet, - &msg.acknowledgement, - &msg.signer, - )?, - PacketMsg::ToPacket(msg) => { - cb.on_timeout_packet(module_output, &msg.packet, &msg.signer)? - } - PacketMsg::ToClosePacket(msg) => { - cb.on_timeout_packet(module_output, &msg.packet, &msg.signer)? - } - }; - Ok(()) + match msg { + PacketMsg::RecvPacket(msg) => { + let result = cb.on_recv_packet(module_output, &msg.packet, &msg.signer); + match result { + OnRecvPacketAck::Nil(write_fn) | OnRecvPacketAck::Successful(_, write_fn) => { + write_fn(cb.as_any_mut()).map_err(Error::app_module)?; + }, + OnRecvPacketAck::Failed(_) => {}, + } + }, + PacketMsg::AckPacket(msg) => cb.on_acknowledgement_packet( + module_output, + &msg.packet, + &msg.acknowledgement, + &msg.signer, + )?, + PacketMsg::ToPacket(msg) => + cb.on_timeout_packet(module_output, &msg.packet, &msg.signer)?, + PacketMsg::ToClosePacket(msg) => + cb.on_timeout_packet(module_output, &msg.packet, &msg.signer)?, + }; + Ok(()) } diff --git a/modules/src/core/ics04_channel/handler/acknowledgement.rs b/modules/src/core/ics04_channel/handler/acknowledgement.rs index 5246953b8d..d10c49937f 100644 --- a/modules/src/core/ics04_channel/handler/acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/acknowledgement.rs @@ -1,232 +1,239 @@ -use crate::core::ics03_connection::connection::State as ConnectionState; -use crate::core::ics04_channel::channel::State; -use crate::core::ics04_channel::channel::{Counterparty, Order}; -use crate::core::ics04_channel::events::AcknowledgePacket; -use crate::core::ics04_channel::handler::verify::verify_packet_acknowledgement_proofs; -use crate::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; -use crate::core::ics04_channel::packet::{PacketResult, Sequence}; -use crate::core::ics04_channel::{context::ChannelReader, error::Error}; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; +use crate::{ + core::{ + ics03_connection::connection::State as ConnectionState, + ics04_channel::{ + channel::{Counterparty, Order, State}, + error::Error, + events::AcknowledgePacket, + handler::verify::verify_packet_acknowledgement_proofs, + msgs::acknowledgement::MsgAcknowledgement, + packet::{PacketResult, Sequence}, + }, + ics24_host::identifier::{ChannelId, PortId}, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; +use core::fmt::Debug; #[derive(Clone, Debug)] pub struct AckPacketResult { - pub port_id: PortId, - pub channel_id: ChannelId, - pub seq: Sequence, - pub seq_number: Option, + pub port_id: PortId, + pub channel_id: ChannelId, + pub seq: Sequence, + pub seq_number: Option, } -pub fn process( - ctx: &dyn ChannelReader, - msg: &MsgAcknowledgement, +pub fn process( + ctx: &Ctx, + msg: &MsgAcknowledgement, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); + let mut output = HandlerOutput::builder(); - let packet = &msg.packet; + let packet = &msg.packet; - let source_channel_end = - ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; + let source_channel_end = + ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; - if !source_channel_end.state_matches(&State::Open) { - return Err(Error::channel_closed(packet.source_channel)); - } + if !source_channel_end.state_matches(&State::Open) { + return Err(Error::channel_closed(packet.source_channel)) + } - let counterparty = Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ); + let counterparty = + Counterparty::new(packet.destination_port.clone(), Some(packet.destination_channel)); - if !source_channel_end.counterparty_matches(&counterparty) { - return Err(Error::invalid_packet_counterparty( - packet.destination_port.clone(), - packet.destination_channel, - )); - } + if !source_channel_end.counterparty_matches(&counterparty) { + return Err(Error::invalid_packet_counterparty( + packet.destination_port.clone(), + packet.destination_channel, + )) + } - let connection_end = ctx.connection_end(&source_channel_end.connection_hops()[0])?; + let connection_end = ctx + .connection_end(&source_channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; - if !connection_end.state_matches(&ConnectionState::Open) { - return Err(Error::connection_not_open( - source_channel_end.connection_hops()[0].clone(), - )); - } + if !connection_end.state_matches(&ConnectionState::Open) { + return Err(Error::connection_not_open(source_channel_end.connection_hops()[0].clone())) + } - // Verify packet commitment - let packet_commitment = ctx.get_packet_commitment(&( - packet.source_port.clone(), - packet.source_channel, - packet.sequence, - ))?; + // Verify packet commitment + let packet_commitment = ctx.get_packet_commitment(&( + packet.source_port.clone(), + packet.source_channel, + packet.sequence, + ))?; - if packet_commitment - != ctx.packet_commitment( - packet.data.clone(), - packet.timeout_height, - packet.timeout_timestamp, - ) - { - return Err(Error::incorrect_packet_commitment(packet.sequence)); - } + if packet_commitment != + ctx.packet_commitment( + packet.data.clone(), + packet.timeout_height, + packet.timeout_timestamp, + ) { + return Err(Error::incorrect_packet_commitment(packet.sequence)) + } - // Verify the acknowledgement proof - verify_packet_acknowledgement_proofs( - ctx, - msg.proofs.height(), - packet, - msg.acknowledgement.clone(), - &connection_end, - &msg.proofs, - )?; + // Verify the acknowledgement proof + verify_packet_acknowledgement_proofs::( + ctx, + msg.proofs.height(), + packet, + msg.acknowledgement.clone(), + &connection_end, + &msg.proofs, + )?; - let result = if source_channel_end.order_matches(&Order::Ordered) { - let next_seq_ack = - ctx.get_next_sequence_ack(&(packet.source_port.clone(), packet.source_channel))?; + let result = if source_channel_end.order_matches(&Order::Ordered) { + let next_seq_ack = + ctx.get_next_sequence_ack(&(packet.source_port.clone(), packet.source_channel))?; - if packet.sequence != next_seq_ack { - return Err(Error::invalid_packet_sequence( - packet.sequence, - next_seq_ack, - )); - } + if packet.sequence != next_seq_ack { + return Err(Error::invalid_packet_sequence(packet.sequence, next_seq_ack)) + } - PacketResult::Ack(AckPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - seq_number: Some(next_seq_ack.increment()), - }) - } else { - PacketResult::Ack(AckPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - seq_number: None, - }) - }; + PacketResult::Ack(AckPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + seq_number: Some(next_seq_ack.increment()), + }) + } else { + PacketResult::Ack(AckPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + seq_number: None, + }) + }; - output.log("success: packet ack"); + output.log("success: packet ack"); - output.emit(IbcEvent::AcknowledgePacket(AcknowledgePacket { - height: ctx.host_height(), - packet: packet.clone(), - })); + output.emit(IbcEvent::AcknowledgePacket(AcknowledgePacket { + height: ctx.host_height(), + packet: packet.clone(), + })); - Ok(output.with_result(result)) + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use test_log::test; + use test_log::test; - use crate::core::ics02_client::height::Height; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; - use crate::core::ics04_channel::context::ChannelReader; - use crate::core::ics04_channel::handler::acknowledgement::process; - use crate::core::ics04_channel::msgs::acknowledgement::test_util::get_dummy_raw_msg_acknowledgement; - use crate::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::prelude::*; - use crate::timestamp::ZERO_DURATION; + use crate::{ + core::{ + ics02_client::{context::ClientReader, height::Height}, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + context::ChannelReader, + handler::acknowledgement::process, + msgs::acknowledgement::{ + test_util::get_dummy_raw_msg_acknowledgement, MsgAcknowledgement, + }, + Version, + }, + ics24_host::identifier::{ClientId, ConnectionId}, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + prelude::*, + timestamp::ZERO_DURATION, + }; - #[test] - fn ack_packet_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: MsgAcknowledgement, - want_pass: bool, - } + #[test] + fn ack_packet_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: MsgAcknowledgement, + want_pass: bool, + } - let context = MockContext::default(); + let context = MockContext::default(); - let client_height = Height::new(0, Height::default().revision_height + 2); + let client_height = Height::new(0, Height::default().revision_height + 2); - let msg = MsgAcknowledgement::try_from(get_dummy_raw_msg_acknowledgement( - client_height.revision_height, - )) - .unwrap(); - let packet = msg.packet.clone(); + let msg = MsgAcknowledgement::try_from(get_dummy_raw_msg_acknowledgement( + client_height.revision_height, + )) + .unwrap(); + let packet = msg.packet.clone(); - let data = context.packet_commitment( - packet.data.clone(), - packet.timeout_height, - packet.timeout_timestamp, - ); + let data = context.packet_commitment( + packet.data.clone(), + packet.timeout_height, + packet.timeout_timestamp, + ); - let source_channel_end = ChannelEnd::new( - State::Open, - Order::default(), - Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ), - vec![ConnectionId::default()], - Version::ics20(), - ); + let source_channel_end = ChannelEnd::new( + State::Open, + Order::default(), + Counterparty::new(packet.destination_port.clone(), Some(packet.destination_channel)), + vec![ConnectionId::default()], + Version::ics20(), + ); - let connection_end = ConnectionEnd::new( - ConnectionState::Open, - ClientId::default(), - ConnectionCounterparty::new( - ClientId::default(), - Some(ConnectionId::default()), - Default::default(), - ), - get_compatible_versions(), - ZERO_DURATION, - ); + let connection_end = ConnectionEnd::new( + ConnectionState::Open, + ClientId::default(), + ConnectionCounterparty::new( + ClientId::default(), + Some(ConnectionId::default()), + Default::default(), + ), + get_compatible_versions(), + ZERO_DURATION, + ); - let tests: Vec = vec![ - Test { - name: "Processing fails because no channel exists in the context".to_string(), - ctx: context.clone(), - msg: msg.clone(), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context - .with_client(&ClientId::default(), client_height) - .with_connection(ConnectionId::default(), connection_end) - .with_channel( - packet.source_port.clone(), - packet.source_channel, - source_channel_end, - ) - .with_packet_commitment( - packet.source_port, - packet.source_channel, - packet.sequence, - data, - ) //with_ack_sequence required for ordered channels - .with_ack_sequence( - packet.destination_port, - packet.destination_channel, - 1.into(), - ), - msg, - want_pass: true, - }, - ] - .into_iter() - .collect(); + let tests: Vec = vec![ + Test { + name: "Processing fails because no channel exists in the context".to_string(), + ctx: context.clone(), + msg: msg.clone(), + want_pass: false, + }, + Test { + name: "Good parameters".to_string(), + ctx: context + .with_client(&ClientId::default(), client_height) + .with_connection(ConnectionId::default(), connection_end) + .with_channel( + packet.source_port.clone(), + packet.source_channel, + source_channel_end, + ) + .with_packet_commitment( + packet.source_port, + packet.source_channel, + packet.sequence, + data, + ) //with_ack_sequence required for ordered channels + .with_ack_sequence( + packet.destination_port, + packet.destination_channel, + 1.into(), + ), + msg, + want_pass: true, + }, + ] + .into_iter() + .collect(); - for test in tests { - let res = process(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + for test in tests { + let res = process(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "ack_packet: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -234,24 +241,24 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. + assert!(!proto_output.events.is_empty()); // Some events must exist. - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::AcknowledgePacket(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "ack_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg.clone(), - test.ctx.clone(), - e, - ); - } - } - } - } + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::AcknowledgePacket(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "ack_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg.clone(), + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs index fb47d41ac0..48e20c7d43 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_confirm.rs @@ -1,179 +1,195 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelCloseConfirm`. -use crate::core::ics03_connection::connection::State as ConnectionState; -use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; -use crate::core::ics04_channel::context::ChannelReader; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::Attributes; -use crate::core::ics04_channel::handler::verify::verify_channel_proofs; -use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; -use crate::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ChannelReader, - msg: &MsgChannelCloseConfirm, + +use crate::{ + core::{ + ics03_connection::connection::State as ConnectionState, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, State}, + error::Error, + events::Attributes, + handler::{verify::verify_channel_proofs, ChannelIdState, ChannelResult}, + msgs::chan_close_confirm::MsgChannelCloseConfirm, + }, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: &MsgChannelCloseConfirm, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Retrieve the old channel end and validate it against the message. - let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; - - // Validate that the channel end is in a state where it can be closed. - if channel_end.state_matches(&State::Closed) { - return Err(Error::channel_closed(msg.channel_id)); - } - - // An OPEN IBC connection running on the local (host) chain should exist. - if channel_end.connection_hops().len() != 1 { - return Err(Error::invalid_connection_hops_length( - 1, - channel_end.connection_hops().len(), - )); - } - - let conn = ctx.connection_end(&channel_end.connection_hops()[0])?; - - if !conn.state_matches(&ConnectionState::Open) { - return Err(Error::connection_not_open( - channel_end.connection_hops()[0].clone(), - )); - } - - // Proof verification in two steps: - // 1. Setup: build the Channel as we expect to find it on the other party. - - let expected_counterparty = Counterparty::new(msg.port_id.clone(), Some(msg.channel_id)); - - let counterparty = conn.counterparty(); - let ccid = counterparty.connection_id().ok_or_else(|| { - Error::undefined_connection_counterparty(channel_end.connection_hops()[0].clone()) - })?; - - let expected_connection_hops = vec![ccid.clone()]; - - let expected_channel_end = ChannelEnd::new( - State::Closed, - *channel_end.ordering(), - expected_counterparty, - expected_connection_hops, - channel_end.version().clone(), - ); - - verify_channel_proofs( - ctx, - msg.proofs.height(), - &channel_end, - &conn, - &expected_channel_end, - &msg.proofs, - )?; - - output.log("success: channel close confirm "); - - // Transition the channel end to the new state & pick a version. - channel_end.set_state(State::Closed); - - let result = ChannelResult { - port_id: msg.port_id.clone(), - channel_id: msg.channel_id, - channel_id_state: ChannelIdState::Reused, - channel_end, - }; - - let event_attributes = Attributes { - channel_id: Some(msg.channel_id), - height: ctx.host_height(), - ..Default::default() - }; - output.emit(IbcEvent::CloseConfirmChannel( - event_attributes - .try_into() - .map_err(|_| Error::missing_channel_id())?, - )); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + // Retrieve the old channel end and validate it against the message. + let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; + + // Validate that the channel end is in a state where it can be closed. + if channel_end.state_matches(&State::Closed) { + return Err(Error::channel_closed(msg.channel_id)) + } + + // An OPEN IBC connection running on the local (host) chain should exist. + if channel_end.connection_hops().len() != 1 { + return Err(Error::invalid_connection_hops_length(1, channel_end.connection_hops().len())) + } + + let conn = ctx + .connection_end(&channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + if !conn.state_matches(&ConnectionState::Open) { + return Err(Error::connection_not_open(channel_end.connection_hops()[0].clone())) + } + + // Proof verification in two steps: + // 1. Setup: build the Channel as we expect to find it on the other party. + + let expected_counterparty = Counterparty::new(msg.port_id.clone(), Some(msg.channel_id)); + + let counterparty = conn.counterparty(); + let ccid = counterparty.connection_id().ok_or_else(|| { + Error::undefined_connection_counterparty(channel_end.connection_hops()[0].clone()) + })?; + + let expected_connection_hops = vec![ccid.clone()]; + + let expected_channel_end = ChannelEnd::new( + State::Closed, + *channel_end.ordering(), + expected_counterparty, + expected_connection_hops, + channel_end.version().clone(), + ); + + verify_channel_proofs::( + ctx, + msg.proofs.height(), + &channel_end, + &conn, + &expected_channel_end, + &msg.proofs.object_proof(), + )?; + + output.log("success: channel close confirm "); + + // Transition the channel end to the new state & pick a version. + channel_end.set_state(State::Closed); + + let event_attributes = Attributes { + channel_id: Some(msg.channel_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: channel_end.connection_hops[0].clone(), + counterparty_port_id: channel_end.counterparty().port_id.clone(), + counterparty_channel_id: channel_end.counterparty().channel_id.clone(), + }; + + let result = ChannelResult { + port_id: msg.port_id.clone(), + channel_id: msg.channel_id, + channel_id_state: ChannelIdState::Reused, + channel_end, + }; + + output.emit(IbcEvent::CloseConfirmChannel( + event_attributes.try_into().map_err(|_| Error::missing_channel_id())?, + )); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::core::ics04_channel::context::ChannelReader; - use crate::core::ics04_channel::msgs::chan_close_confirm::test_util::get_dummy_raw_msg_chan_close_confirm; - use crate::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; - use crate::core::ics04_channel::msgs::ChannelMsg; - use crate::events::IbcEvent; - use crate::prelude::*; - - use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ - ChannelEnd, Counterparty, Order, State as ChannelState, - }; - use crate::core::ics04_channel::handler::channel_dispatch; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; - - use crate::mock::context::MockContext; - use crate::timestamp::ZERO_DURATION; - - #[test] - fn chan_close_confirm_event_height() { - let client_id = ClientId::new(ClientType::Mock, 24).unwrap(); - let conn_id = ConnectionId::new(2); - let default_context = MockContext::default(); - let client_consensus_state_height = default_context.host_height(); - - let conn_end = ConnectionEnd::new( - ConnectionState::Open, - client_id.clone(), - ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), - get_compatible_versions(), - ZERO_DURATION, - ); - - let msg_chan_close_confirm = MsgChannelCloseConfirm::try_from( - get_dummy_raw_msg_chan_close_confirm(client_consensus_state_height.revision_height), - ) - .unwrap(); - - let chan_end = ChannelEnd::new( - ChannelState::Open, - Order::default(), - Counterparty::new( - msg_chan_close_confirm.port_id.clone(), - Some(msg_chan_close_confirm.channel_id), - ), - vec![conn_id.clone()], - Version::default(), - ); - - let context = default_context - .with_client(&client_id, client_consensus_state_height) - .with_connection(conn_id, conn_end) - .with_channel( - msg_chan_close_confirm.port_id.clone(), - msg_chan_close_confirm.channel_id, - chan_end, - ); - - let (handler_output_builder, _) = channel_dispatch( - &context, - &ChannelMsg::ChannelCloseConfirm(msg_chan_close_confirm), - ) - .unwrap(); - - let handler_output = handler_output_builder.with_result(()); - - assert!(!handler_output.events.is_empty()); // Some events must exist. - - for event in handler_output.events.iter() { - assert!(matches!(event, &IbcEvent::CloseConfirmChannel(_))); - assert_eq!(event.height(), context.host_height()); - } - } + use crate::{ + core::ics04_channel::msgs::{ + chan_close_confirm::{ + test_util::get_dummy_raw_msg_chan_close_confirm, MsgChannelCloseConfirm, + }, + ChannelMsg, + }, + events::IbcEvent, + prelude::*, + }; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + msgs::test_util::get_dummy_raw_counterparty, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State as ChannelState}, + handler::channel_dispatch, + Version, + }, + ics24_host::identifier::{ClientId, ConnectionId}, + }, + mock::client_state::MockClientState, + }; + + use crate::{ + mock::context::{MockClientTypes, MockContext}, + timestamp::ZERO_DURATION, + }; + + #[test] + fn chan_close_confirm_event_height() { + let client_id = ClientId::new(&MockClientState::client_type(), 24).unwrap(); + let conn_id = ConnectionId::new(2); + let default_context = MockContext::::default(); + let client_consensus_state_height = default_context.host_height(); + + let conn_end = ConnectionEnd::new( + ConnectionState::Open, + client_id.clone(), + ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), + get_compatible_versions(), + ZERO_DURATION, + ); + + let msg_chan_close_confirm = MsgChannelCloseConfirm::try_from( + get_dummy_raw_msg_chan_close_confirm(client_consensus_state_height.revision_height), + ) + .unwrap(); + + let chan_end = ChannelEnd::new( + ChannelState::Open, + Order::default(), + Counterparty::new( + msg_chan_close_confirm.port_id.clone(), + Some(msg_chan_close_confirm.channel_id), + ), + vec![conn_id.clone()], + Version::default(), + ); + + let context = default_context + .with_client(&client_id, client_consensus_state_height) + .with_connection(conn_id, conn_end) + .with_channel( + msg_chan_close_confirm.port_id.clone(), + msg_chan_close_confirm.channel_id, + chan_end, + ); + + let (handler_output_builder, _) = + channel_dispatch(&context, &ChannelMsg::ChannelCloseConfirm(msg_chan_close_confirm)) + .unwrap(); + + let handler_output = handler_output_builder.with_result(()); + + assert!(!handler_output.events.is_empty()); // Some events must exist. + + for event in handler_output.events.iter() { + assert!(matches!(event, &IbcEvent::CloseConfirmChannel(_))); + assert_eq!(event.height(), context.host_height()); + } + } } diff --git a/modules/src/core/ics04_channel/handler/chan_close_init.rs b/modules/src/core/ics04_channel/handler/chan_close_init.rs index 3e503a7753..1809c48b3a 100644 --- a/modules/src/core/ics04_channel/handler/chan_close_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_close_init.rs @@ -1,148 +1,162 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelCloseInit`. -use crate::core::ics03_connection::connection::State as ConnectionState; -use crate::core::ics04_channel::channel::State; -use crate::core::ics04_channel::context::ChannelReader; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::Attributes; -use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; -use crate::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; - -pub(crate) fn process( - ctx: &dyn ChannelReader, - msg: &MsgChannelCloseInit, + +use crate::{ + core::{ + ics03_connection::connection::State as ConnectionState, + ics04_channel::{ + channel::State, + error::Error, + events::Attributes, + handler::{ChannelIdState, ChannelResult}, + msgs::chan_close_init::MsgChannelCloseInit, + }, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: &MsgChannelCloseInit, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Unwrap the old channel end and validate it against the message. - let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; - - // Validate that the channel end is in a state where it can be closed. - if channel_end.state_matches(&State::Closed) { - return Err(Error::invalid_channel_state( - msg.channel_id, - channel_end.state, - )); - } - - // An OPEN IBC connection running on the local (host) chain should exist. - if channel_end.connection_hops().len() != 1 { - return Err(Error::invalid_connection_hops_length( - 1, - channel_end.connection_hops().len(), - )); - } - - let conn = ctx.connection_end(&channel_end.connection_hops()[0])?; - - if !conn.state_matches(&ConnectionState::Open) { - return Err(Error::connection_not_open( - channel_end.connection_hops()[0].clone(), - )); - } - - output.log("success: channel close init "); - - // Transition the channel end to the new state & pick a version. - channel_end.set_state(State::Closed); - - let result = ChannelResult { - port_id: msg.port_id.clone(), - channel_id: msg.channel_id, - channel_id_state: ChannelIdState::Reused, - channel_end, - }; - - let event_attributes = Attributes { - channel_id: Some(msg.channel_id), - height: ctx.host_height(), - ..Default::default() - }; - output.emit(IbcEvent::CloseInitChannel( - event_attributes - .try_into() - .map_err(|_| Error::missing_channel_id())?, - )); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + // Unwrap the old channel end and validate it against the message. + let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; + + // Validate that the channel end is in a state where it can be closed. + if channel_end.state_matches(&State::Closed) { + return Err(Error::invalid_channel_state(msg.channel_id, channel_end.state)) + } + + // An OPEN IBC connection running on the local (host) chain should exist. + if channel_end.connection_hops().len() != 1 { + return Err(Error::invalid_connection_hops_length(1, channel_end.connection_hops().len())) + } + + let conn = ctx + .connection_end(&channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + if !conn.state_matches(&ConnectionState::Open) { + return Err(Error::connection_not_open(channel_end.connection_hops()[0].clone())) + } + + output.log("success: channel close init "); + + // Transition the channel end to the new state & pick a version. + channel_end.set_state(State::Closed); + + let event_attributes = Attributes { + channel_id: Some(msg.channel_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: channel_end.connection_hops[0].clone(), + counterparty_port_id: channel_end.counterparty().port_id.clone(), + counterparty_channel_id: channel_end.counterparty().channel_id.clone(), + }; + + let result = ChannelResult { + port_id: msg.port_id.clone(), + channel_id: msg.channel_id, + channel_id_state: ChannelIdState::Reused, + channel_end, + }; + + output.emit(IbcEvent::CloseInitChannel( + event_attributes.try_into().map_err(|_| Error::missing_channel_id())?, + )); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::core::ics04_channel::context::ChannelReader; - use crate::core::ics04_channel::msgs::chan_close_init::test_util::get_dummy_raw_msg_chan_close_init; - use crate::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; - use crate::core::ics04_channel::msgs::ChannelMsg; - use crate::events::IbcEvent; - use crate::prelude::*; - - use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ - ChannelEnd, Counterparty, Order, State as ChannelState, - }; - use crate::core::ics04_channel::handler::channel_dispatch; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; - - use crate::mock::context::MockContext; - use crate::timestamp::ZERO_DURATION; - - #[test] - fn chan_close_init_event_height() { - let client_id = ClientId::new(ClientType::Mock, 24).unwrap(); - let conn_id = ConnectionId::new(2); - - let conn_end = ConnectionEnd::new( - ConnectionState::Open, - client_id.clone(), - ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), - get_compatible_versions(), - ZERO_DURATION, - ); - - let msg_chan_close_init = - MsgChannelCloseInit::try_from(get_dummy_raw_msg_chan_close_init()).unwrap(); - - let chan_end = ChannelEnd::new( - ChannelState::Open, - Order::default(), - Counterparty::new( - msg_chan_close_init.port_id.clone(), - Some(msg_chan_close_init.channel_id), - ), - vec![conn_id.clone()], - Version::default(), - ); - - let context = { - let default_context = MockContext::default(); - let client_consensus_state_height = default_context.host_height(); - - default_context - .with_client(&client_id, client_consensus_state_height) - .with_connection(conn_id, conn_end) - .with_channel( - msg_chan_close_init.port_id.clone(), - msg_chan_close_init.channel_id, - chan_end, - ) - }; - - let (handler_output_builder, _) = - channel_dispatch(&context, &ChannelMsg::ChannelCloseInit(msg_chan_close_init)).unwrap(); - let handler_output = handler_output_builder.with_result(()); - - assert!(!handler_output.events.is_empty()); // Some events must exist. - - for event in handler_output.events.iter() { - assert!(matches!(event, &IbcEvent::CloseInitChannel(_))); - assert_eq!(event.height(), context.host_height()); - } - } + use crate::{ + core::ics04_channel::msgs::{ + chan_close_init::{test_util::get_dummy_raw_msg_chan_close_init, MsgChannelCloseInit}, + ChannelMsg, + }, + events::IbcEvent, + prelude::*, + }; + + use crate::core::{ + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + msgs::test_util::get_dummy_raw_counterparty, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State as ChannelState}, + handler::channel_dispatch, + Version, + }, + ics24_host::identifier::{ClientId, ConnectionId}, + }; + + use crate::{ + core::ics02_client::context::ClientReader, + mock::{ + client_state::MockClientState, + context::{MockClientTypes, MockContext}, + }, + timestamp::ZERO_DURATION, + }; + + #[test] + fn chan_close_init_event_height() { + let client_id = ClientId::new(&MockClientState::client_type(), 24).unwrap(); + let conn_id = ConnectionId::new(2); + + let conn_end = ConnectionEnd::new( + ConnectionState::Open, + client_id.clone(), + ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), + get_compatible_versions(), + ZERO_DURATION, + ); + + let msg_chan_close_init = + MsgChannelCloseInit::try_from(get_dummy_raw_msg_chan_close_init()).unwrap(); + + let chan_end = ChannelEnd::new( + ChannelState::Open, + Order::default(), + Counterparty::new( + msg_chan_close_init.port_id.clone(), + Some(msg_chan_close_init.channel_id), + ), + vec![conn_id.clone()], + Version::default(), + ); + + let context = { + let default_context = MockContext::::default(); + let client_consensus_state_height = default_context.host_height(); + + default_context + .with_client(&client_id, client_consensus_state_height) + .with_connection(conn_id, conn_end) + .with_channel( + msg_chan_close_init.port_id.clone(), + msg_chan_close_init.channel_id, + chan_end, + ) + }; + + let (handler_output_builder, _) = + channel_dispatch(&context, &ChannelMsg::ChannelCloseInit(msg_chan_close_init)).unwrap(); + let handler_output = handler_output_builder.with_result(()); + + assert!(!handler_output.events.is_empty()); // Some events must exist. + + for event in handler_output.events.iter() { + assert!(matches!(event, &IbcEvent::CloseInitChannel(_))); + assert_eq!(event.height(), context.host_height()); + } + } } diff --git a/modules/src/core/ics04_channel/handler/chan_open_ack.rs b/modules/src/core/ics04_channel/handler/chan_open_ack.rs index e883694e3e..cfbe7bb5b4 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_ack.rs @@ -1,290 +1,303 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenAck`. -use crate::core::ics03_connection::connection::State as ConnectionState; -use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; -use crate::core::ics04_channel::context::ChannelReader; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::Attributes; -use crate::core::ics04_channel::handler::verify::verify_channel_proofs; -use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; -use crate::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ChannelReader, - msg: &MsgChannelOpenAck, + +use crate::{ + core::{ + ics03_connection::connection::State as ConnectionState, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, State}, + error::Error, + events::Attributes, + handler::{verify::verify_channel_proofs, ChannelIdState, ChannelResult}, + msgs::chan_open_ack::MsgChannelOpenAck, + }, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: &MsgChannelOpenAck, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Unwrap the old channel end and validate it against the message. - let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; - - // Validate that the channel end is in a state where it can be ack. - if !channel_end.state_matches(&State::Init) && !channel_end.state_matches(&State::TryOpen) { - return Err(Error::invalid_channel_state( - msg.channel_id, - channel_end.state, - )); - } - - // An OPEN IBC connection running on the local (host) chain should exist. - - if channel_end.connection_hops().len() != 1 { - return Err(Error::invalid_connection_hops_length( - 1, - channel_end.connection_hops().len(), - )); - } - - let conn = ctx.connection_end(&channel_end.connection_hops()[0])?; - - if !conn.state_matches(&ConnectionState::Open) { - return Err(Error::connection_not_open( - channel_end.connection_hops()[0].clone(), - )); - } - - // Proof verification in two steps: - // 1. Setup: build the Channel as we expect to find it on the other party. - - let expected_counterparty = Counterparty::new(msg.port_id.clone(), Some(msg.channel_id)); - - let counterparty = conn.counterparty(); - let ccid = counterparty.connection_id().ok_or_else(|| { - Error::undefined_connection_counterparty(channel_end.connection_hops()[0].clone()) - })?; - - let expected_connection_hops = vec![ccid.clone()]; - - let expected_channel_end = ChannelEnd::new( - State::TryOpen, - *channel_end.ordering(), - expected_counterparty, - expected_connection_hops, - msg.counterparty_version.clone(), - ); - - // set the counterparty channel id to verify against it - channel_end.set_counterparty_channel_id(msg.counterparty_channel_id); - - //2. Verify proofs - verify_channel_proofs( - ctx, - msg.proofs.height(), - &channel_end, - &conn, - &expected_channel_end, - &msg.proofs, - )?; - - output.log("success: channel open ack "); - - // Transition the channel end to the new state & pick a version. - channel_end.set_state(State::Open); - channel_end.set_version(msg.counterparty_version.clone()); - - let result = ChannelResult { - port_id: msg.port_id.clone(), - channel_id: msg.channel_id, - channel_id_state: ChannelIdState::Reused, - channel_end, - }; - - let event_attributes = Attributes { - channel_id: Some(msg.channel_id), - height: ctx.host_height(), - ..Default::default() - }; - output.emit(IbcEvent::OpenAckChannel( - event_attributes - .try_into() - .map_err(|_| Error::missing_channel_id())?, - )); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + // Unwrap the old channel end and validate it against the message. + let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; + + // Validate that the channel end is in a state where it can be ack. + if !channel_end.state_matches(&State::Init) && !channel_end.state_matches(&State::TryOpen) { + return Err(Error::invalid_channel_state(msg.channel_id, channel_end.state)) + } + + // An OPEN IBC connection running on the local (host) chain should exist. + + if channel_end.connection_hops().len() != 1 { + return Err(Error::invalid_connection_hops_length(1, channel_end.connection_hops().len())) + } + + let conn = ctx + .connection_end(&channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + if !conn.state_matches(&ConnectionState::Open) { + return Err(Error::connection_not_open(channel_end.connection_hops()[0].clone())) + } + + // Proof verification in two steps: + // 1. Setup: build the Channel as we expect to find it on the other party. + + let expected_counterparty = Counterparty::new(msg.port_id.clone(), Some(msg.channel_id)); + + let counterparty = conn.counterparty(); + let ccid = counterparty.connection_id().ok_or_else(|| { + Error::undefined_connection_counterparty(channel_end.connection_hops()[0].clone()) + })?; + + let expected_connection_hops = vec![ccid.clone()]; + + let expected_channel_end = ChannelEnd::new( + State::TryOpen, + *channel_end.ordering(), + expected_counterparty, + expected_connection_hops, + msg.counterparty_version.clone(), + ); + + // set the counterparty channel id to verify against it + channel_end.set_counterparty_channel_id(msg.counterparty_channel_id); + + //2. Verify proofs + verify_channel_proofs::( + ctx, + msg.proofs.height(), + &channel_end, + &conn, + &expected_channel_end, + &msg.proofs.object_proof(), + )?; + + output.log("success: channel open ack "); + + // Transition the channel end to the new state & pick a version. + channel_end.set_state(State::Open); + channel_end.set_version(msg.counterparty_version.clone()); + + let event_attributes = Attributes { + channel_id: Some(msg.channel_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: channel_end.connection_hops[0].clone(), + counterparty_port_id: channel_end.counterparty().port_id.clone(), + counterparty_channel_id: channel_end.counterparty().channel_id.clone(), + }; + + let result = ChannelResult { + port_id: msg.port_id.clone(), + channel_id: msg.channel_id, + channel_id_state: ChannelIdState::Reused, + channel_end, + }; + + output.emit(IbcEvent::OpenAckChannel( + event_attributes.try_into().map_err(|_| Error::missing_channel_id())?, + )); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use core::str::FromStr; - - use test_log::test; - - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::msgs::conn_open_init::test_util::get_dummy_raw_msg_conn_open_init; - use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; - use crate::core::ics03_connection::msgs::conn_open_try::test_util::get_dummy_raw_msg_conn_open_try; - use crate::core::ics03_connection::msgs::conn_open_try::MsgConnectionOpenTry; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; - use crate::core::ics04_channel::context::ChannelReader; - use crate::core::ics04_channel::handler::channel_dispatch; - use crate::core::ics04_channel::msgs::chan_open_ack::test_util::get_dummy_raw_msg_chan_open_ack; - use crate::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; - use crate::core::ics04_channel::msgs::chan_open_try::test_util::get_dummy_raw_msg_chan_open_try; - use crate::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; - use crate::core::ics04_channel::msgs::ChannelMsg; - use crate::core::ics24_host::identifier::ConnectionId; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::prelude::*; - use crate::Height; - - // TODO: The tests here are very fragile and complex. - // Should be adapted to use the same structure as `handler::chan_open_try::tests`. - #[test] - fn chan_open_ack_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ChannelMsg, - want_pass: bool, - } - let proof_height = 10; - let client_consensus_state_height = 10; - let host_chain_height = Height::new(0, 35); - - let context = MockContext::default(); - - let msg_conn_init = - MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); - - let conn_end = ConnectionEnd::new( - ConnectionState::Open, - msg_conn_init.client_id.clone(), - ConnectionCounterparty::new( - msg_conn_init.counterparty.client_id().clone(), - Some(ConnectionId::from_str("defaultConnection-1").unwrap()), - msg_conn_init.counterparty.prefix().clone(), - ), - get_compatible_versions(), - msg_conn_init.delay_period, - ); - - let ccid = ::from_str("defaultConnection-0"); - let cid = match ccid { - Ok(v) => v, - Err(_e) => ConnectionId::default(), - }; - - let mut connection_vec0 = Vec::new(); - connection_vec0.insert( - 0, - match ::from_str("defaultConnection-0") { - Ok(a) => a, - _ => unreachable!(), - }, - ); - - let msg_conn_try = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( - client_consensus_state_height, - host_chain_height.revision_height, - )) - .unwrap(); - - let msg_chan_ack = - MsgChannelOpenAck::try_from(get_dummy_raw_msg_chan_open_ack(proof_height)).unwrap(); - - let msg_chan_try = - MsgChannelOpenTry::try_from(get_dummy_raw_msg_chan_open_try(proof_height)).unwrap(); - - let chan_end = ChannelEnd::new( - State::Init, - *msg_chan_try.channel.ordering(), - Counterparty::new(msg_chan_ack.port_id.clone(), Some(msg_chan_ack.channel_id)), - connection_vec0.clone(), - msg_chan_try.channel.version().clone(), - ); - - let failed_chan_end = ChannelEnd::new( - State::Open, - *msg_chan_try.channel.ordering(), - Counterparty::new(msg_chan_ack.port_id.clone(), Some(msg_chan_ack.channel_id)), - connection_vec0, - msg_chan_try.channel.version().clone(), - ); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no channel exists in the context".to_string(), - ctx: context.clone(), - msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), - want_pass: false, - }, - Test { - name: "Processing fails because the channel is in the wrong state".to_string(), - ctx: context - .clone() - .with_client( - &msg_conn_try.client_id, - Height::new(0, client_consensus_state_height), - ) - .with_channel( - msg_chan_ack.port_id.clone(), - msg_chan_ack.channel_id, - failed_chan_end, - ), - msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), - want_pass: false, - }, - Test { - name: "Processing fails because a connection does exist".to_string(), - ctx: context - .clone() - .with_client( - &msg_conn_try.client_id, - Height::new(0, client_consensus_state_height), - ) - .with_channel( - msg_chan_ack.port_id.clone(), - msg_chan_ack.channel_id, - chan_end.clone(), - ), - msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), - want_pass: false, - }, - Test { - name: "Processing fails due to missing client state ".to_string(), - ctx: context - .clone() - .with_connection(cid.clone(), conn_end.clone()) - .with_channel( - msg_chan_ack.port_id.clone(), - msg_chan_ack.channel_id, - chan_end.clone(), - ), - msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context // .clone() - .with_client( - &msg_conn_try.client_id, - Height::new(0, client_consensus_state_height), - ) - .with_connection(cid, conn_end) - .with_channel( - msg_chan_ack.port_id.clone(), - msg_chan_ack.channel_id, - chan_end, - ), - msg: ChannelMsg::ChannelOpenAck(msg_chan_ack), - want_pass: true, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = channel_dispatch(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok((proto_output, res)) => { - assert!( + use core::str::FromStr; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + msgs::{ + conn_open_init::{ + test_util::get_dummy_raw_msg_conn_open_init, MsgConnectionOpenInit, + }, + conn_open_try::{ + test_util::get_dummy_raw_msg_conn_open_try, MsgConnectionOpenTry, + }, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, State}, + handler::channel_dispatch, + msgs::{ + chan_open_ack::{ + test_util::get_dummy_raw_msg_chan_open_ack, MsgChannelOpenAck, + }, + chan_open_try::{ + test_util::get_dummy_raw_msg_chan_open_try, MsgChannelOpenTry, + }, + ChannelMsg, + }, + }, + ics24_host::identifier::ConnectionId, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + prelude::*, + Height, + }; + + // TODO: The tests here are very fragile and complex. + // Should be adapted to use the same structure as `handler::chan_open_try::tests`. + #[test] + fn chan_open_ack_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ChannelMsg, + want_pass: bool, + } + let proof_height = 10; + let client_consensus_state_height = 10; + let host_chain_height = Height::new(0, 35); + + let context = MockContext::default(); + + let msg_conn_init = + MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); + + let conn_end = ConnectionEnd::new( + ConnectionState::Open, + msg_conn_init.client_id.clone(), + ConnectionCounterparty::new( + msg_conn_init.counterparty.client_id().clone(), + Some(ConnectionId::from_str("defaultConnection-1").unwrap()), + msg_conn_init.counterparty.prefix().clone(), + ), + get_compatible_versions(), + msg_conn_init.delay_period, + ); + + let ccid = ::from_str("defaultConnection-0"); + let cid = match ccid { + Ok(v) => v, + Err(_e) => ConnectionId::default(), + }; + + let mut connection_vec0 = Vec::new(); + connection_vec0.insert( + 0, + match ::from_str("defaultConnection-0") { + Ok(a) => a, + _ => unreachable!(), + }, + ); + + let msg_conn_try = MsgConnectionOpenTry::>::try_from( + get_dummy_raw_msg_conn_open_try( + client_consensus_state_height, + host_chain_height.revision_height, + ), + ) + .unwrap(); + + let msg_chan_ack = + MsgChannelOpenAck::try_from(get_dummy_raw_msg_chan_open_ack(proof_height)).unwrap(); + + let msg_chan_try = + MsgChannelOpenTry::try_from(get_dummy_raw_msg_chan_open_try(proof_height)).unwrap(); + + let chan_end = ChannelEnd::new( + State::Init, + *msg_chan_try.channel.ordering(), + Counterparty::new(msg_chan_ack.port_id.clone(), Some(msg_chan_ack.channel_id)), + connection_vec0.clone(), + msg_chan_try.channel.version().clone(), + ); + + let failed_chan_end = ChannelEnd::new( + State::Open, + *msg_chan_try.channel.ordering(), + Counterparty::new(msg_chan_ack.port_id.clone(), Some(msg_chan_ack.channel_id)), + connection_vec0, + msg_chan_try.channel.version().clone(), + ); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no channel exists in the context".to_string(), + ctx: context.clone(), + msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), + want_pass: false, + }, + Test { + name: "Processing fails because the channel is in the wrong state".to_string(), + ctx: context + .clone() + .with_client( + &msg_conn_try.client_id, + Height::new(0, client_consensus_state_height), + ) + .with_channel( + msg_chan_ack.port_id.clone(), + msg_chan_ack.channel_id, + failed_chan_end, + ), + msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), + want_pass: false, + }, + Test { + name: "Processing fails because a connection does exist".to_string(), + ctx: context + .clone() + .with_client( + &msg_conn_try.client_id, + Height::new(0, client_consensus_state_height), + ) + .with_channel( + msg_chan_ack.port_id.clone(), + msg_chan_ack.channel_id, + chan_end.clone(), + ), + msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), + want_pass: false, + }, + Test { + name: "Processing fails due to missing client state ".to_string(), + ctx: context.clone().with_connection(cid.clone(), conn_end.clone()).with_channel( + msg_chan_ack.port_id.clone(), + msg_chan_ack.channel_id, + chan_end.clone(), + ), + msg: ChannelMsg::ChannelOpenAck(msg_chan_ack.clone()), + want_pass: false, + }, + Test { + name: "Good parameters".to_string(), + ctx: context // .clone() + .with_client( + &msg_conn_try.client_id, + Height::new(0, client_consensus_state_height), + ) + .with_connection(cid, conn_end) + .with_channel(msg_chan_ack.port_id.clone(), msg_chan_ack.channel_id, chan_end), + msg: ChannelMsg::ChannelOpenAck(msg_chan_ack), + want_pass: true, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = channel_dispatch(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok((proto_output, res)) => { + assert!( test.want_pass, "chan_open_ack: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -292,29 +305,29 @@ mod tests { test.ctx.clone() ); - let proto_output = proto_output.with_result(()); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - // The object in the output is a ConnectionEnd, should have init state. - //assert_eq!(res.channel_id, msg_chan_init.channel_id().clone()); - assert_eq!(res.channel_end.state().clone(), State::Open); - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenAckChannel(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "chan_open_ack: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg, - test.ctx.clone(), - e, - ); - } - } - } - } + let proto_output = proto_output.with_result(()); + assert!(!proto_output.events.is_empty()); // Some events must exist. + + // The object in the output is a ConnectionEnd, should have init state. + //assert_eq!(res.channel_id, msg_chan_init.channel_id().clone()); + assert_eq!(res.channel_end.state().clone(), State::Open); + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenAckChannel(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "chan_open_ack: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg, + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs index 23420c4fa5..0dbc543673 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_confirm.rs @@ -1,192 +1,206 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenConfirm`. -use crate::core::ics03_connection::connection::State as ConnectionState; -use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; -use crate::core::ics04_channel::context::ChannelReader; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::Attributes; -use crate::core::ics04_channel::handler::verify::verify_channel_proofs; -use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; -use crate::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ChannelReader, - msg: &MsgChannelOpenConfirm, -) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Unwrap the old channel end and validate it against the message. - let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; - - // Validate that the channel end is in a state where it can be confirmed. - if !channel_end.state_matches(&State::TryOpen) { - return Err(Error::invalid_channel_state( - msg.channel_id, - channel_end.state, - )); - } - - // An OPEN IBC connection running on the local (host) chain should exist. - if channel_end.connection_hops().len() != 1 { - return Err(Error::invalid_connection_hops_length( - 1, - channel_end.connection_hops().len(), - )); - } - - let conn = ctx.connection_end(&channel_end.connection_hops()[0])?; - - if !conn.state_matches(&ConnectionState::Open) { - return Err(Error::connection_not_open( - channel_end.connection_hops()[0].clone(), - )); - } - - // Proof verification in two steps: - // 1. Setup: build the Channel as we expect to find it on the other party. - - let expected_counterparty = Counterparty::new(msg.port_id.clone(), Some(msg.channel_id)); - - let connection_counterparty = conn.counterparty(); - let ccid = connection_counterparty.connection_id().ok_or_else(|| { - Error::undefined_connection_counterparty(channel_end.connection_hops()[0].clone()) - })?; - - let expected_connection_hops = vec![ccid.clone()]; - - let expected_channel_end = ChannelEnd::new( - State::Open, - *channel_end.ordering(), - expected_counterparty, - expected_connection_hops, - channel_end.version().clone(), - ); - //2. Verify proofs - verify_channel_proofs( - ctx, - msg.proofs.height(), - &channel_end, - &conn, - &expected_channel_end, - &msg.proofs, - ) - .map_err(Error::chan_open_confirm_proof_verification)?; - - output.log("success: channel open confirm "); - - // Transition the channel end to the new state. - channel_end.set_state(State::Open); - - let result = ChannelResult { - port_id: msg.port_id.clone(), - channel_id: msg.channel_id, - channel_id_state: ChannelIdState::Reused, - channel_end, - }; - - let event_attributes = Attributes { - channel_id: Some(msg.channel_id), - height: ctx.host_height(), - ..Default::default() - }; - output.emit(IbcEvent::OpenConfirmChannel( - event_attributes - .try_into() - .map_err(|_| Error::missing_channel_id())?, - )); - - Ok(output.with_result(result)) + +use crate::{ + core::{ + ics03_connection::connection::State as ConnectionState, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, State}, + error::Error, + events::Attributes, + handler::{verify::verify_channel_proofs, ChannelIdState, ChannelResult}, + msgs::chan_open_confirm::MsgChannelOpenConfirm, + }, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: &MsgChannelOpenConfirm, +) -> HandlerResult +where + Ctx: ReaderContext, +{ + let mut output = HandlerOutput::builder(); + + // Unwrap the old channel end and validate it against the message. + let mut channel_end = ctx.channel_end(&(msg.port_id.clone(), msg.channel_id))?; + + // Validate that the channel end is in a state where it can be confirmed. + if !channel_end.state_matches(&State::TryOpen) { + return Err(Error::invalid_channel_state(msg.channel_id, channel_end.state)) + } + + // An OPEN IBC connection running on the local (host) chain should exist. + if channel_end.connection_hops().len() != 1 { + return Err(Error::invalid_connection_hops_length(1, channel_end.connection_hops().len())) + } + + let conn = ctx + .connection_end(&channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + if !conn.state_matches(&ConnectionState::Open) { + return Err(Error::connection_not_open(channel_end.connection_hops()[0].clone())) + } + + // Proof verification in two steps: + // 1. Setup: build the Channel as we expect to find it on the other party. + + let expected_counterparty = Counterparty::new(msg.port_id.clone(), Some(msg.channel_id)); + + let connection_counterparty = conn.counterparty(); + let ccid = connection_counterparty.connection_id().ok_or_else(|| { + Error::undefined_connection_counterparty(channel_end.connection_hops()[0].clone()) + })?; + + let expected_connection_hops = vec![ccid.clone()]; + + let expected_channel_end = ChannelEnd::new( + State::Open, + *channel_end.ordering(), + expected_counterparty, + expected_connection_hops, + channel_end.version().clone(), + ); + //2. Verify proofs + verify_channel_proofs::( + ctx, + msg.proofs.height(), + &channel_end, + &conn, + &expected_channel_end, + &msg.proofs.object_proof(), + ) + .map_err(Error::chan_open_confirm_proof_verification)?; + + output.log("success: channel open confirm "); + + // Transition the channel end to the new state. + channel_end.set_state(State::Open); + + let event_attributes = Attributes { + channel_id: Some(msg.channel_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: channel_end.connection_hops[0].clone(), + counterparty_port_id: channel_end.counterparty().port_id.clone(), + counterparty_channel_id: channel_end.counterparty().channel_id.clone(), + }; + + let result = ChannelResult { + port_id: msg.port_id.clone(), + channel_id: msg.channel_id, + channel_id_state: ChannelIdState::Reused, + channel_end, + }; + + output.emit(IbcEvent::OpenConfirmChannel( + event_attributes.try_into().map_err(|_| Error::missing_channel_id())?, + )); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::context::ConnectionReader; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; - use crate::core::ics04_channel::context::ChannelReader; - use crate::core::ics04_channel::handler::channel_dispatch; - use crate::core::ics04_channel::msgs::chan_open_confirm::test_util::get_dummy_raw_msg_chan_open_confirm; - use crate::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; - use crate::core::ics04_channel::msgs::ChannelMsg; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ClientId, ConnectionId}; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::timestamp::ZERO_DURATION; - use crate::Height; - - // TODO: The tests here should use the same structure as `handler::chan_open_try::tests`. - #[test] - fn chan_open_confirm_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ChannelMsg, - want_pass: bool, - } - let client_id = ClientId::new(ClientType::Mock, 24).unwrap(); - let conn_id = ConnectionId::new(2); - let context = MockContext::default(); - let client_consensus_state_height = context.host_current_height().revision_height; - - // The connection underlying the channel we're trying to open. - let conn_end = ConnectionEnd::new( - ConnectionState::Open, - client_id.clone(), - ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), - get_compatible_versions(), - ZERO_DURATION, - ); - - let msg_chan_confirm = MsgChannelOpenConfirm::try_from( - get_dummy_raw_msg_chan_open_confirm(client_consensus_state_height), - ) - .unwrap(); - - let chan_end = ChannelEnd::new( - State::TryOpen, - Order::default(), - Counterparty::new( - msg_chan_confirm.port_id.clone(), - Some(msg_chan_confirm.channel_id), - ), - vec![conn_id.clone()], - Version::default(), - ); - - let tests: Vec = vec![Test { - name: "Good parameters".to_string(), - ctx: context - .with_client(&client_id, Height::new(0, client_consensus_state_height)) - .with_connection(conn_id, conn_end) - .with_channel( - msg_chan_confirm.port_id.clone(), - msg_chan_confirm.channel_id, - chan_end, - ), - msg: ChannelMsg::ChannelOpenConfirm(msg_chan_confirm), - want_pass: true, - }] - .into_iter() - .collect(); - - for test in tests { - let res = channel_dispatch(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok((proto_output, res)) => { - assert!( + use crate::prelude::*; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + msgs::test_util::get_dummy_raw_counterparty, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + handler::channel_dispatch, + msgs::{ + chan_open_confirm::{ + test_util::get_dummy_raw_msg_chan_open_confirm, MsgChannelOpenConfirm, + }, + ChannelMsg, + }, + Version, + }, + ics24_host::identifier::{ClientId, ConnectionId}, + }, + events::IbcEvent, + mock::{ + client_state::MockClientState, + context::{MockClientTypes, MockContext}, + }, + timestamp::ZERO_DURATION, + Height, + }; + + // TODO: The tests here should use the same structure as `handler::chan_open_try::tests`. + #[test] + fn chan_open_confirm_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ChannelMsg, + want_pass: bool, + } + let client_id = ClientId::new(&MockClientState::client_type(), 24).unwrap(); + let conn_id = ConnectionId::new(2); + let context = MockContext::default(); + let client_consensus_state_height = context.host_height().revision_height; + + // The connection underlying the channel we're trying to open. + let conn_end = ConnectionEnd::new( + ConnectionState::Open, + client_id.clone(), + ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), + get_compatible_versions(), + ZERO_DURATION, + ); + + let msg_chan_confirm = MsgChannelOpenConfirm::try_from( + get_dummy_raw_msg_chan_open_confirm(client_consensus_state_height), + ) + .unwrap(); + + let chan_end = ChannelEnd::new( + State::TryOpen, + Order::default(), + Counterparty::new(msg_chan_confirm.port_id.clone(), Some(msg_chan_confirm.channel_id)), + vec![conn_id.clone()], + Version::default(), + ); + + let tests: Vec = vec![Test { + name: "Good parameters".to_string(), + ctx: context + .with_client(&client_id, Height::new(0, client_consensus_state_height)) + .with_connection(conn_id, conn_end) + .with_channel( + msg_chan_confirm.port_id.clone(), + msg_chan_confirm.channel_id, + chan_end, + ), + msg: ChannelMsg::ChannelOpenConfirm(msg_chan_confirm), + want_pass: true, + }] + .into_iter() + .collect(); + + for test in tests { + let res = channel_dispatch(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok((proto_output, res)) => { + assert!( test.want_pass, "chan_open_confirm: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -194,29 +208,29 @@ mod tests { test.ctx.clone() ); - let proto_output = proto_output.with_result(()); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - // The object in the output is a ConnectionEnd, should have init state. - //assert_eq!(res.channel_id, msg_chan_init.channel_id().clone()); - assert_eq!(res.channel_end.state().clone(), State::Open); - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenConfirmChannel(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "chan_open_ack: did not pass test: {}, \nparams {:?} {:?}\nerror: {:?}", - test.name, - test.msg, - test.ctx.clone(), - e, - ); - } - } - } - } + let proto_output = proto_output.with_result(()); + assert!(!proto_output.events.is_empty()); // Some events must exist. + + // The object in the output is a ConnectionEnd, should have init state. + //assert_eq!(res.channel_id, msg_chan_init.channel_id().clone()); + assert_eq!(res.channel_end.state().clone(), State::Open); + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenConfirmChannel(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "chan_open_ack: did not pass test: {}, \nparams {:?} {:?}\nerror: {:?}", + test.name, + test.msg, + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/chan_open_init.rs b/modules/src/core/ics04_channel/handler/chan_open_init.rs index 9a7bff9ff7..8788459581 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_init.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_init.rs @@ -1,153 +1,168 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenInit`. -use crate::core::ics04_channel::channel::{ChannelEnd, State}; -use crate::core::ics04_channel::context::ChannelReader; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::Attributes; -use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; -use crate::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; -use crate::core::ics24_host::identifier::ChannelId; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ChannelReader, - msg: &MsgChannelOpenInit, +use crate::{ + core::{ + ics04_channel::{ + channel::{ChannelEnd, State}, + error::Error, + events::Attributes, + handler::{ChannelIdState, ChannelResult}, + msgs::chan_open_init::MsgChannelOpenInit, + }, + ics24_host::identifier::ChannelId, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: &MsgChannelOpenInit, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - if msg.channel.connection_hops().len() != 1 { - return Err(Error::invalid_connection_hops_length( - 1, - msg.channel.connection_hops().len(), - )); - } - - // An IBC connection running on the local (host) chain should exist. - let conn = ctx.connection_end(&msg.channel.connection_hops()[0])?; - let get_versions = conn.versions(); - let version = match get_versions { - [version] => version, - _ => return Err(Error::invalid_version_length_connection()), - }; - - let channel_feature = msg.channel.ordering().to_string(); - if !version.is_supported_feature(channel_feature) { - return Err(Error::channel_feature_not_suported_by_connection()); - } - - // Channel identifier construction. - let id_counter = ctx.channel_counter()?; - let chan_id = ChannelId::new(id_counter); - - output.log(format!( - "success: generated new channel identifier: {}", - chan_id - )); - - let new_channel_end = ChannelEnd::new( - State::Init, - *msg.channel.ordering(), - msg.channel.counterparty().clone(), - msg.channel.connection_hops().clone(), - msg.channel.version().clone(), - ); - - output.log("success: no channel found"); - - let result = ChannelResult { - port_id: msg.port_id.clone(), - channel_id: chan_id, - channel_end: new_channel_end, - channel_id_state: ChannelIdState::Generated, - }; - - let event_attributes = Attributes { - channel_id: Some(chan_id), - height: ctx.host_height(), - ..Default::default() - }; - output.emit(IbcEvent::OpenInitChannel( - event_attributes - .try_into() - .map_err(|_| Error::missing_channel_id())?, - )); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + if msg.channel.connection_hops().len() != 1 { + return Err(Error::invalid_connection_hops_length(1, msg.channel.connection_hops().len())) + } + + // An IBC connection running on the local (host) chain should exist. + let conn = ctx + .connection_end(&msg.channel.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + let get_versions = conn.versions(); + let version = match get_versions { + [version] => version, + _ => return Err(Error::invalid_version_length_connection()), + }; + + let channel_feature = msg.channel.ordering().to_string(); + if !version.is_supported_feature(channel_feature) { + return Err(Error::channel_feature_not_suported_by_connection()) + } + + // Channel identifier construction. + let id_counter = ctx.channel_counter()?; + let chan_id = ChannelId::new(id_counter); + + output.log(format!("success: generated new channel identifier: {}", chan_id)); + + let new_channel_end = ChannelEnd::new( + State::Init, + *msg.channel.ordering(), + msg.channel.counterparty().clone(), + msg.channel.connection_hops().clone(), + msg.channel.version().clone(), + ); + + output.log("success: no channel found"); + + let event_attributes = Attributes { + channel_id: Some(chan_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: new_channel_end.connection_hops[0].clone(), + counterparty_port_id: new_channel_end.counterparty().port_id.clone(), + counterparty_channel_id: new_channel_end.counterparty().channel_id.clone(), + }; + + let result = ChannelResult { + port_id: msg.port_id.clone(), + channel_id: chan_id, + channel_end: new_channel_end, + channel_id_state: ChannelIdState::Generated, + }; + + output.emit(IbcEvent::OpenInitChannel( + event_attributes.try_into().map_err(|_| Error::missing_channel_id())?, + )); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::msgs::conn_open_init::test_util::get_dummy_raw_msg_conn_open_init; - use crate::core::ics03_connection::msgs::conn_open_init::MsgConnectionOpenInit; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::State; - use crate::core::ics04_channel::context::ChannelReader; - use crate::core::ics04_channel::handler::channel_dispatch; - use crate::core::ics04_channel::msgs::chan_open_init::test_util::get_dummy_raw_msg_chan_open_init; - use crate::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; - use crate::core::ics04_channel::msgs::ChannelMsg; - use crate::core::ics24_host::identifier::ConnectionId; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - - #[test] - fn chan_open_init_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ChannelMsg, - want_pass: bool, - } - - let msg_chan_init = - MsgChannelOpenInit::try_from(get_dummy_raw_msg_chan_open_init()).unwrap(); - - let context = MockContext::default(); - - let msg_conn_init = - MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); - - let init_conn_end = ConnectionEnd::new( - ConnectionState::Init, - msg_conn_init.client_id.clone(), - msg_conn_init.counterparty.clone(), - get_compatible_versions(), - msg_conn_init.delay_period, - ); - - let cid = ConnectionId::default(); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no connection exists in the context".to_string(), - ctx: context.clone(), - msg: ChannelMsg::ChannelOpenInit(msg_chan_init.clone()), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context.with_connection(cid, init_conn_end), - msg: ChannelMsg::ChannelOpenInit(msg_chan_init), - want_pass: true, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = channel_dispatch(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok((proto_output, res)) => { - assert!( + use crate::prelude::*; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::{ConnectionEnd, State as ConnectionState}, + msgs::conn_open_init::{ + test_util::get_dummy_raw_msg_conn_open_init, MsgConnectionOpenInit, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::State, + handler::channel_dispatch, + msgs::{ + chan_open_init::{ + test_util::get_dummy_raw_msg_chan_open_init, MsgChannelOpenInit, + }, + ChannelMsg, + }, + }, + ics24_host::identifier::ConnectionId, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + }; + + #[test] + fn chan_open_init_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ChannelMsg, + want_pass: bool, + } + + let msg_chan_init = + MsgChannelOpenInit::try_from(get_dummy_raw_msg_chan_open_init()).unwrap(); + + let context = MockContext::default(); + + let msg_conn_init = + MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); + + let init_conn_end = ConnectionEnd::new( + ConnectionState::Init, + msg_conn_init.client_id.clone(), + msg_conn_init.counterparty.clone(), + get_compatible_versions(), + msg_conn_init.delay_period, + ); + + let cid = ConnectionId::default(); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no connection exists in the context".to_string(), + ctx: context.clone(), + msg: ChannelMsg::ChannelOpenInit(msg_chan_init.clone()), + want_pass: false, + }, + Test { + name: "Good parameters".to_string(), + ctx: context.with_connection(cid, init_conn_end), + msg: ChannelMsg::ChannelOpenInit(msg_chan_init), + want_pass: true, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = channel_dispatch(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok((proto_output, res)) => { + assert!( test.want_pass, "chan_open_init: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -155,33 +170,33 @@ mod tests { test.ctx.clone() ); - let proto_output = proto_output.with_result(()); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - // The object in the output is a ChannelEnd, should have init state. - assert_eq!(res.channel_end.state().clone(), State::Init); - let msg_init = test.msg; - - if let ChannelMsg::ChannelOpenInit(msg_init) = msg_init { - assert_eq!(res.port_id.clone(), msg_init.port_id.clone()); - } - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenInitChannel(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "chan_open_init: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg, - test.ctx.clone(), - e, - ); - } - } - } - } + let proto_output = proto_output.with_result(()); + assert!(!proto_output.events.is_empty()); // Some events must exist. + + // The object in the output is a ChannelEnd, should have init state. + assert_eq!(res.channel_end.state().clone(), State::Init); + let msg_init = test.msg; + + if let ChannelMsg::ChannelOpenInit(msg_init) = msg_init { + assert_eq!(res.port_id.clone(), msg_init.port_id.clone()); + } + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenInitChannel(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "chan_open_init: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg, + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/chan_open_try.rs b/modules/src/core/ics04_channel/handler/chan_open_try.rs index 126a7ad8e9..365c32a39c 100644 --- a/modules/src/core/ics04_channel/handler/chan_open_try.rs +++ b/modules/src/core/ics04_channel/handler/chan_open_try.rs @@ -1,395 +1,300 @@ //! Protocol logic specific to ICS4 messages of type `MsgChannelOpenTry`. -use crate::core::ics03_connection::connection::State as ConnectionState; -use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, State}; -use crate::core::ics04_channel::context::ChannelReader; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::Attributes; -use crate::core::ics04_channel::handler::verify::verify_channel_proofs; -use crate::core::ics04_channel::handler::{ChannelIdState, ChannelResult}; -use crate::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; -use crate::core::ics24_host::identifier::ChannelId; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; - -pub(crate) fn process( - ctx: &dyn ChannelReader, - msg: &MsgChannelOpenTry, -) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - // Unwrap the old channel end (if any) and validate it against the message. - let (mut new_channel_end, channel_id) = match &msg.previous_channel_id { - Some(prev_id) => { - let old_channel_end = ctx.channel_end(&(msg.port_id.clone(), *prev_id))?; - - // Validate that existing channel end matches with the one we're trying to establish. - if old_channel_end.state_matches(&State::Init) - && old_channel_end.order_matches(msg.channel.ordering()) - && old_channel_end.connection_hops_matches(msg.channel.connection_hops()) - && old_channel_end.counterparty_matches(msg.channel.counterparty()) - && old_channel_end.version_matches(msg.channel.version()) - { - // A ChannelEnd already exists and all validation passed. - Ok((old_channel_end, *prev_id)) - } else { - // A ConnectionEnd already exists and validation failed. - Err(Error::channel_mismatch(*prev_id)) - } - } - // No previous channel id was supplied. Create a new channel end & an identifier. - None => { - let channel_end = ChannelEnd::new( - State::Init, - *msg.channel.ordering(), - msg.channel.counterparty().clone(), - msg.channel.connection_hops().clone(), - msg.counterparty_version.clone(), - ); - - // Channel identifier construction. - let id_counter = ctx.channel_counter()?; - let chan_id = ChannelId::new(id_counter); - - output.log(format!( - "success: generated new channel identifier: {}", - chan_id - )); - - Ok((channel_end, chan_id)) - } - }?; - - // An IBC connection running on the local (host) chain should exist. - if msg.channel.connection_hops().len() != 1 { - return Err(Error::invalid_connection_hops_length( - 1, - msg.channel.connection_hops().len(), - )); - } - - let conn = ctx.connection_end(&msg.channel.connection_hops()[0])?; - if !conn.state_matches(&ConnectionState::Open) { - return Err(Error::connection_not_open( - msg.channel.connection_hops()[0].clone(), - )); - } - - let get_versions = conn.versions(); - let version = match get_versions { - [version] => version, - _ => return Err(Error::invalid_version_length_connection()), - }; - - let channel_feature = msg.channel.ordering().to_string(); - if !version.is_supported_feature(channel_feature) { - return Err(Error::channel_feature_not_suported_by_connection()); - } - - // Proof verification in two steps: - // 1. Setup: build the Channel as we expect to find it on the other party. - // the port should be identical with the port we're using; the channel id should not be set - // since the counterparty cannot know yet which ID did we choose. - let expected_counterparty = Counterparty::new(msg.port_id.clone(), None); - let counterparty = conn.counterparty(); - let ccid = counterparty.connection_id().ok_or_else(|| { - Error::undefined_connection_counterparty(msg.channel.connection_hops()[0].clone()) - })?; - let expected_connection_hops = vec![ccid.clone()]; - - // The other party should be storing a channel end in this configuration. - let expected_channel_end = ChannelEnd::new( - State::Init, - *msg.channel.ordering(), - expected_counterparty, - expected_connection_hops, - msg.counterparty_version.clone(), - ); - - // 2. Actual proofs are verified now. - verify_channel_proofs( - ctx, - msg.proofs.height(), - &new_channel_end, - &conn, - &expected_channel_end, - &msg.proofs, - )?; - - output.log("success: channel open try "); - - // Transition the channel end to the new state & pick a version. - new_channel_end.set_state(State::TryOpen); - - let result = ChannelResult { - port_id: msg.port_id.clone(), - channel_id_state: if matches!(msg.previous_channel_id, None) { - ChannelIdState::Generated - } else { - ChannelIdState::Reused - }, - channel_id, - channel_end: new_channel_end, - }; - - let event_attributes = Attributes { - channel_id: Some(channel_id), - height: ctx.host_height(), - ..Default::default() - }; - output.emit(IbcEvent::OpenTryChannel( - event_attributes - .try_into() - .map_err(|_| Error::missing_channel_id())?, - )); - - Ok(output.with_result(result)) +use crate::{ + core::{ + ics03_connection::connection::State as ConnectionState, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, State}, + error::Error, + events::Attributes, + handler::{verify::verify_channel_proofs, ChannelIdState, ChannelResult}, + msgs::chan_open_try::MsgChannelOpenTry, + }, + ics24_host::identifier::ChannelId, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, +}; + +pub(crate) fn process( + ctx: &Ctx, + msg: &MsgChannelOpenTry, +) -> HandlerResult +where + Ctx: ReaderContext, +{ + let mut output = HandlerOutput::builder(); + + // Unwrap the old channel end (if any) and validate it against the message. + let (mut new_channel_end, channel_id) = { + let channel_end = ChannelEnd::new( + State::Init, + *msg.channel.ordering(), + msg.channel.counterparty().clone(), + msg.channel.connection_hops().clone(), + msg.counterparty_version.clone(), + ); + + // Channel identifier construction. + let id_counter = ctx.channel_counter()?; + let chan_id = ChannelId::new(id_counter); + + output.log(format!("success: generated new channel identifier: {}", chan_id)); + + (channel_end, chan_id) + }; + + // An IBC connection running on the local (host) chain should exist. + if msg.channel.connection_hops().len() != 1 { + return Err(Error::invalid_connection_hops_length(1, msg.channel.connection_hops().len())) + } + + let conn = ctx + .connection_end(&msg.channel.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + if !conn.state_matches(&ConnectionState::Open) { + return Err(Error::connection_not_open(msg.channel.connection_hops()[0].clone())) + } + + let get_versions = conn.versions(); + let version = match get_versions { + [version] => version, + _ => return Err(Error::invalid_version_length_connection()), + }; + + let channel_feature = msg.channel.ordering().to_string(); + if !version.is_supported_feature(channel_feature) { + return Err(Error::channel_feature_not_suported_by_connection()) + } + + // Proof verification in two steps: + // 1. Setup: build the Channel as we expect to find it on the other party. + // the port should be identical with the port we're using; the channel id should not be set + // since the counterparty cannot know yet which ID did we choose. + let expected_counterparty = Counterparty::new(msg.port_id.clone(), None); + let counterparty = conn.counterparty(); + let ccid = counterparty.connection_id().ok_or_else(|| { + Error::undefined_connection_counterparty(msg.channel.connection_hops()[0].clone()) + })?; + let expected_connection_hops = vec![ccid.clone()]; + + // The other party should be storing a channel end in this configuration. + let expected_channel_end = ChannelEnd::new( + State::Init, + *msg.channel.ordering(), + expected_counterparty, + expected_connection_hops, + msg.counterparty_version.clone(), + ); + + // 2. Actual proofs are verified now. + verify_channel_proofs::( + ctx, + msg.proofs.height(), + &new_channel_end, + &conn, + &expected_channel_end, + &msg.proofs.object_proof(), + )?; + + output.log("success: channel open try "); + + // Transition the channel end to the new state & pick a version. + new_channel_end.set_state(State::TryOpen); + + let event_attributes = Attributes { + channel_id: Some(channel_id), + height: ctx.host_height(), + port_id: msg.port_id.clone(), + connection_id: new_channel_end.connection_hops[0].clone(), + counterparty_port_id: new_channel_end.counterparty().port_id.clone(), + counterparty_channel_id: new_channel_end.counterparty().channel_id.clone(), + }; + + let result = ChannelResult { + port_id: msg.port_id.clone(), + channel_id_state: ChannelIdState::Generated, + channel_id, + channel_end: new_channel_end, + }; + + output.emit(IbcEvent::OpenTryChannel( + event_attributes.try_into().map_err(|_| Error::missing_channel_id())?, + )); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics02_client::error as ics02_error; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::error as ics03_error; - use crate::core::ics03_connection::msgs::test_util::get_dummy_raw_counterparty; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, State}; - use crate::core::ics04_channel::context::ChannelReader; - use crate::core::ics04_channel::handler::channel_dispatch; - use crate::core::ics04_channel::msgs::chan_open_try::test_util::get_dummy_raw_msg_chan_open_try; - use crate::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; - use crate::core::ics04_channel::msgs::ChannelMsg; - use crate::core::ics04_channel::{error, Version}; - use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId}; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::timestamp::ZERO_DURATION; - use crate::Height; - - #[test] - fn chan_open_try_msg_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: ChannelMsg, - want_pass: bool, - match_error: Box, - } - - // Some general-purpose variable to parametrize the messages and the context. - let proof_height = 10; - let conn_id = ConnectionId::new(2); - let client_id = ClientId::new(ClientType::Mock, 45).unwrap(); - - // The context. We'll reuse this same one across all tests. - let context = MockContext::default(); - - // This is the connection underlying the channel we're trying to open. - let conn_end = ConnectionEnd::new( - ConnectionState::Open, - client_id.clone(), - ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), - get_compatible_versions(), - ZERO_DURATION, - ); - - // We're going to test message processing against this message. - let mut msg = - MsgChannelOpenTry::try_from(get_dummy_raw_msg_chan_open_try(proof_height)).unwrap(); - - // Assumption: an already existing `Init` channel should exist in the context for `msg`, and - // this channel should depend on connection `conn_id`. - let chan_id = ChannelId::new(24); - let hops = vec![conn_id.clone()]; - msg.previous_channel_id = Some(chan_id); - msg.channel.connection_hops = hops; - - // This message does not assume a channel should already be initialized. - let mut msg_vanilla = msg.clone(); - msg_vanilla.previous_channel_id = None; - - // A preloaded channel end that resides in the context. This is constructed so as to be - // consistent with the incoming ChanOpenTry message `msg`. - let correct_chan_end = ChannelEnd::new( - State::Init, - *msg.channel.ordering(), - msg.channel.counterparty().clone(), - msg.channel.connection_hops().clone(), - msg.channel.version().clone(), - ); - - // A preloaded channel end that resides in the context. This is constructed so as to be - // __inconsistent__ with the incoming ChanOpenTry message `msg` due to its version field. - let version = Version::from(format!("{}-", msg.channel.version())); - let incorrect_chan_end_ver = ChannelEnd::new( - State::Init, - *msg.channel.ordering(), - msg.channel.counterparty().clone(), - msg.channel.connection_hops().clone(), - version, - ); - - // A preloaded channel end residing in the context, which will be __inconsistent__ with - // the incoming ChanOpenTry message `msg` due to its connection hops field. - let hops = vec![ConnectionId::new(9890)]; - let incorrect_chan_end_hops = ChannelEnd::new( - State::Init, - *msg.channel.ordering(), - msg.channel.counterparty().clone(), - hops, - msg.channel.version().clone(), - ); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no channel is preloaded in the context".to_string(), - ctx: context.clone(), - msg: ChannelMsg::ChannelOpenTry(msg.clone()), - want_pass: false, - match_error: { - let port_id = msg.port_id.clone(); - let channel_id = chan_id; - Box::new(move |e| match e { - error::ErrorDetail::ChannelNotFound(e) => { - assert_eq!(e.port_id, port_id); - assert_eq!(e.channel_id, channel_id); - } - _ => { - panic!("Expected ChannelNotFound, instead got {}", e) - } - }) - }, - }, - Test { - name: "Processing fails because no connection exists in the context".to_string(), - ctx: context.clone(), - msg: ChannelMsg::ChannelOpenTry(msg_vanilla.clone()), - want_pass: false, - match_error: { - let connection_id = msg.channel.connection_hops()[0].clone(); - Box::new(move |e| match e { - error::ErrorDetail::Ics03Connection(e) => { - assert_eq!( - e.source, - ics03_error::ErrorDetail::ConnectionNotFound( - ics03_error::ConnectionNotFoundSubdetail { connection_id } - ) - ); - } - _ => { - panic!("Expected MissingConnection, instead got {}", e) - } - }) - }, - }, - Test { - name: "Processing fails because of inconsistent version with preexisting channel" - .to_string(), - ctx: context - .clone() - .with_connection(conn_id.clone(), conn_end.clone()) - .with_channel(msg.port_id.clone(), chan_id, incorrect_chan_end_ver), - msg: ChannelMsg::ChannelOpenTry(msg.clone()), - want_pass: false, - match_error: { - let channel_id = chan_id; - Box::new(move |e| match e { - error::ErrorDetail::ChannelMismatch(e) => { - assert_eq!(e.channel_id, channel_id); - } - _ => { - panic!("Expected ChannelMismatch, instead got {}", e) - } - }) - }, - }, - Test { - name: "Processing fails because of inconsistent connection hops".to_string(), - ctx: context - .clone() - .with_connection(conn_id.clone(), conn_end.clone()) - .with_channel(msg.port_id.clone(), chan_id, incorrect_chan_end_hops), - msg: ChannelMsg::ChannelOpenTry(msg.clone()), - want_pass: false, - match_error: { - let channel_id = chan_id; - Box::new(move |e| match e { - error::ErrorDetail::ChannelMismatch(e) => { - assert_eq!(e.channel_id, channel_id); - } - _ => { - panic!("Expected ChannelMismatch, instead got {}", e) - } - }) - }, - }, - Test { - name: "Processing fails b/c the context has no client state".to_string(), - ctx: context - .clone() - .with_connection(conn_id.clone(), conn_end.clone()) - .with_channel(msg.port_id.clone(), chan_id, correct_chan_end.clone()), - msg: ChannelMsg::ChannelOpenTry(msg.clone()), - want_pass: false, - match_error: Box::new(|e| match e { - error::ErrorDetail::Ics03Connection(e) => { - assert_eq!( - e.source, - ics03_error::ErrorDetail::Ics02Client( - ics03_error::Ics02ClientSubdetail { - source: ics02_error::ErrorDetail::ClientNotFound( - ics02_error::ClientNotFoundSubdetail { - client_id: ClientId::new(ClientType::Mock, 45).unwrap() - } - ) - } - ) - ); - } - _ => { - panic!("Expected MissingClientState, instead got {}", e) - } - }), - }, - Test { - name: "Processing is successful".to_string(), - ctx: context - .clone() - .with_client(&client_id, Height::new(0, proof_height)) - .with_connection(conn_id.clone(), conn_end.clone()) - .with_channel(msg.port_id.clone(), chan_id, correct_chan_end), - msg: ChannelMsg::ChannelOpenTry(msg), - want_pass: true, - match_error: Box::new(|_| {}), - }, - Test { - name: "Processing is successful against an empty context (no preexisting channel)" - .to_string(), - ctx: context - .with_client(&client_id, Height::new(0, proof_height)) - .with_connection(conn_id, conn_end), - msg: ChannelMsg::ChannelOpenTry(msg_vanilla), - want_pass: true, - match_error: Box::new(|_| {}), - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = channel_dispatch(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok((proto_output, res)) => { - assert!( + use crate::prelude::*; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::{context::ClientReader, error as ics02_error}, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + error as ics03_error, + msgs::test_util::get_dummy_raw_counterparty, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, State}, + error, + handler::channel_dispatch, + msgs::{ + chan_open_try::{ + test_util::get_dummy_raw_msg_chan_open_try, MsgChannelOpenTry, + }, + ChannelMsg, + }, + }, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId}, + }, + events::IbcEvent, + mock::{ + client_state::MockClientState, + context::{MockClientTypes, MockContext}, + }, + timestamp::ZERO_DURATION, + Height, + }; + + #[test] + fn chan_open_try_msg_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: ChannelMsg, + want_pass: bool, + match_error: Box, + } + + // Some general-purpose variable to parametrize the messages and the context. + let proof_height = 10; + let conn_id = ConnectionId::new(2); + let client_id = ClientId::new(&MockClientState::client_type(), 45).unwrap(); + + // The context. We'll reuse this same one across all tests. + let context = MockContext::default(); + + // This is the connection underlying the channel we're trying to open. + let conn_end = ConnectionEnd::new( + ConnectionState::Open, + client_id.clone(), + ConnectionCounterparty::try_from(get_dummy_raw_counterparty()).unwrap(), + get_compatible_versions(), + ZERO_DURATION, + ); + + // We're going to test message processing against this message. + let mut msg = + MsgChannelOpenTry::try_from(get_dummy_raw_msg_chan_open_try(proof_height)).unwrap(); + + // Assumption: an already existing `Init` channel should exist in the context for `msg`, and + // this channel should depend on connection `conn_id`. + let chan_id = ChannelId::new(24); + let hops = vec![conn_id.clone()]; + msg.channel.connection_hops = hops; + + // This message does not assume a channel should already be initialized. + let msg_vanilla = msg.clone(); + + // A preloaded channel end that resides in the context. This is constructed so as to be + // consistent with the incoming ChanOpenTry message `msg`. + let correct_chan_end = ChannelEnd::new( + State::Init, + *msg.channel.ordering(), + msg.channel.counterparty().clone(), + msg.channel.connection_hops().clone(), + msg.channel.version().clone(), + ); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no connection exists in the context".to_string(), + ctx: context.clone(), + msg: ChannelMsg::ChannelOpenTry(msg_vanilla.clone()), + want_pass: false, + match_error: { + let connection_id = msg.channel.connection_hops()[0].clone(); + Box::new(move |e| match e { + error::ErrorDetail::Ics03Connection(e) => { + assert_eq!( + e.source, + ics03_error::ErrorDetail::ConnectionNotFound( + ics03_error::ConnectionNotFoundSubdetail { connection_id } + ) + ); + }, + _ => { + panic!("Expected MissingConnection, instead got {}", e) + }, + }) + }, + }, + Test { + name: "Processing fails b/c the context has no client state".to_string(), + ctx: context + .clone() + .with_connection(conn_id.clone(), conn_end.clone()) + .with_channel(msg.port_id.clone(), chan_id, correct_chan_end.clone()), + msg: ChannelMsg::ChannelOpenTry(msg.clone()), + want_pass: false, + match_error: Box::new(|e| match e { + error::ErrorDetail::Ics02Client(e) => { + assert_eq!( + e.source, + ics02_error::ErrorDetail::ClientNotFound( + ics02_error::ClientNotFoundSubdetail { + client_id: ClientId::new(&MockClientState::client_type(), 45) + .unwrap() + } + ) + ); + }, + _ => { + panic!("Expected MissingClientState, instead got {}", e) + }, + }), + }, + Test { + name: "Processing is successful".to_string(), + ctx: context + .clone() + .with_client(&client_id, Height::new(0, proof_height)) + .with_connection(conn_id.clone(), conn_end.clone()) + .with_channel(msg.port_id.clone(), chan_id, correct_chan_end), + msg: ChannelMsg::ChannelOpenTry(msg), + want_pass: true, + match_error: Box::new(|_| {}), + }, + Test { + name: "Processing is successful against an empty context (no preexisting channel)" + .to_string(), + ctx: context + .with_client(&client_id, Height::new(0, proof_height)) + .with_connection(conn_id, conn_end), + msg: ChannelMsg::ChannelOpenTry(msg_vanilla), + want_pass: true, + match_error: Box::new(|_| {}), + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = channel_dispatch(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok((proto_output, res)) => { + assert!( test.want_pass, "chan_open_ack: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -397,19 +302,19 @@ mod tests { test.ctx.clone() ); - let proto_output = proto_output.with_result(()); - assert!(!proto_output.events.is_empty()); // Some events must exist. + let proto_output = proto_output.with_result(()); + assert!(!proto_output.events.is_empty()); // Some events must exist. - // The object in the output is a channel end, should have TryOpen state. - assert_eq!(res.channel_end.state().clone(), State::TryOpen); + // The object in the output is a channel end, should have TryOpen state. + assert_eq!(res.channel_end.state().clone(), State::TryOpen); - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::OpenTryChannel(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::OpenTryChannel(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( !test.want_pass, "chan_open_try: did not pass test: {}, \nparams:\n\tmsg={:?}\n\tcontext={:?}\nerror: {:?}", test.name, @@ -418,9 +323,9 @@ mod tests { e, ); - (test.match_error)(e.0); - } - } - } - } + (test.match_error)(e.0); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/recv_packet.rs b/modules/src/core/ics04_channel/handler/recv_packet.rs index 8352f7e245..48685e777c 100644 --- a/modules/src/core/ics04_channel/handler/recv_packet.rs +++ b/modules/src/core/ics04_channel/handler/recv_packet.rs @@ -1,281 +1,293 @@ -use crate::core::ics03_connection::connection::State as ConnectionState; -use crate::core::ics04_channel::channel::{Counterparty, Order, State}; -use crate::core::ics04_channel::context::ChannelReader; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::events::ReceivePacket; -use crate::core::ics04_channel::handler::verify::verify_packet_recv_proofs; -use crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; -use crate::core::ics04_channel::packet::{PacketResult, Receipt, Sequence}; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::timestamp::Expiry; -use crate::Height; - -#[derive(Clone, Debug)] -pub struct RecvPacketSuccess { - pub port_id: PortId, - pub channel_id: ChannelId, - pub seq: Sequence, - pub seq_number: Sequence, - pub receipt: Option, -} +use crate::{ + core::{ + ics03_connection::connection::State as ConnectionState, + ics04_channel::{ + channel::{Counterparty, Order, State}, + error::Error, + events::ReceivePacket, + handler::verify::verify_packet_recv_proofs, + msgs::recv_packet::MsgRecvPacket, + packet::{Packet, PacketResult, Receipt, Sequence}, + }, + ics24_host::identifier::{ChannelId, PortId}, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, + timestamp::Expiry, + Height, +}; +use core::fmt::Debug; #[derive(Clone, Debug)] pub enum RecvPacketResult { - Success(RecvPacketSuccess), - NoOp, + NoOp, + Unordered { + port_id: PortId, + channel_id: ChannelId, + sequence: Sequence, + receipt: Receipt, + packet: Packet, + }, + Ordered { + port_id: PortId, + channel_id: ChannelId, + next_seq_recv: Sequence, + packet: Packet, + }, } -pub fn process(ctx: &dyn ChannelReader, msg: &MsgRecvPacket) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - let packet = &msg.packet; - - let dest_channel_end = - ctx.channel_end(&(packet.destination_port.clone(), packet.destination_channel))?; - - if !dest_channel_end.state_matches(&State::Open) { - return Err(Error::invalid_channel_state( - packet.source_channel, - dest_channel_end.state, - )); - } - - let counterparty = Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)); - - if !dest_channel_end.counterparty_matches(&counterparty) { - return Err(Error::invalid_packet_counterparty( - packet.source_port.clone(), - packet.source_channel, - )); - } - - let connection_end = ctx.connection_end(&dest_channel_end.connection_hops()[0])?; - - if !connection_end.state_matches(&ConnectionState::Open) { - return Err(Error::connection_not_open( - dest_channel_end.connection_hops()[0].clone(), - )); - } - - let latest_height = ctx.host_height(); - if (!packet.timeout_height.is_zero()) && (packet.timeout_height <= latest_height) { - return Err(Error::low_packet_height( - latest_height, - packet.timeout_height, - )); - } - - let latest_timestamp = ctx.host_timestamp(); - if let Expiry::Expired = latest_timestamp.check_expiry(&packet.timeout_timestamp) { - return Err(Error::low_packet_timestamp()); - } - - verify_packet_recv_proofs( - ctx, - msg.proofs.height(), - packet, - &connection_end, - &msg.proofs, - )?; - - let result = if dest_channel_end.order_matches(&Order::Ordered) { - let next_seq_recv = - ctx.get_next_sequence_recv(&(packet.source_port.clone(), packet.source_channel))?; - - if packet.sequence < next_seq_recv { - output.emit(IbcEvent::ReceivePacket(ReceivePacket { - height: Height::zero(), - packet: msg.packet.clone(), - })); - return Ok(output.with_result(PacketResult::Recv(RecvPacketResult::NoOp))); - } else if packet.sequence != next_seq_recv { - return Err(Error::invalid_packet_sequence( - packet.sequence, - next_seq_recv, - )); - } - - PacketResult::Recv(RecvPacketResult::Success(RecvPacketSuccess { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - seq_number: next_seq_recv.increment(), - receipt: None, - })) - } else { - let packet_rec = ctx.get_packet_receipt(&( - packet.source_port.clone(), - packet.source_channel, - packet.sequence, - )); - - match packet_rec { - Ok(_receipt) => { - output.emit(IbcEvent::ReceivePacket(ReceivePacket { - height: Height::zero(), - packet: msg.packet.clone(), - })); - return Ok(output.with_result(PacketResult::Recv(RecvPacketResult::NoOp))); - } - Err(e) if e.detail() == Error::packet_receipt_not_found(packet.sequence).detail() => { - // store a receipt that does not contain any data - PacketResult::Recv(RecvPacketResult::Success(RecvPacketSuccess { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - seq_number: 1.into(), - receipt: Some(Receipt::Ok), - })) - } - Err(_) => return Err(Error::implementation_specific()), - } - }; - - output.log("success: packet receive"); - - output.emit(IbcEvent::ReceivePacket(ReceivePacket { - height: ctx.host_height(), - packet: msg.packet.clone(), - })); - - Ok(output.with_result(result)) +pub fn process( + ctx: &Ctx, + msg: &MsgRecvPacket, +) -> HandlerResult { + let mut output = HandlerOutput::builder(); + + let packet = &msg.packet; + + let dest_channel_end = + ctx.channel_end(&(packet.destination_port.clone(), packet.destination_channel))?; + + if !dest_channel_end.state_matches(&State::Open) { + return Err(Error::invalid_channel_state(packet.source_channel, dest_channel_end.state)) + } + + let counterparty = Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)); + + if !dest_channel_end.counterparty_matches(&counterparty) { + return Err(Error::invalid_packet_counterparty( + packet.source_port.clone(), + packet.source_channel, + )) + } + + let connection_end = ctx + .connection_end(&dest_channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + if !connection_end.state_matches(&ConnectionState::Open) { + return Err(Error::connection_not_open(dest_channel_end.connection_hops()[0].clone())) + } + + let latest_height = ctx.host_height(); + if (!packet.timeout_height.is_zero()) && (packet.timeout_height <= latest_height) { + return Err(Error::low_packet_height(latest_height, packet.timeout_height)) + } + + let latest_timestamp = ctx.host_timestamp(); + if let Expiry::Expired = latest_timestamp.check_expiry(&packet.timeout_timestamp) { + return Err(Error::low_packet_timestamp()) + } + + verify_packet_recv_proofs::( + ctx, + msg.proofs.height(), + packet, + &connection_end, + &msg.proofs, + )?; + + let result = if dest_channel_end.order_matches(&Order::Ordered) { + let next_seq_recv = ctx.get_next_sequence_recv(&( + packet.destination_port.clone(), + packet.destination_channel, + ))?; + + if packet.sequence < next_seq_recv { + output.emit(IbcEvent::ReceivePacket(ReceivePacket { + height: Height::zero(), + packet: msg.packet.clone(), + })); + return Ok(output.with_result(PacketResult::Recv(RecvPacketResult::NoOp))) + } else if packet.sequence != next_seq_recv { + return Err(Error::invalid_packet_sequence(packet.sequence, next_seq_recv)) + } + + PacketResult::Recv(RecvPacketResult::Ordered { + port_id: packet.destination_port.clone(), + channel_id: packet.destination_channel, + next_seq_recv: next_seq_recv.increment(), + packet: packet.clone(), + }) + } else { + let packet_rec = ctx.get_packet_receipt(&( + packet.destination_port.clone(), + packet.destination_channel, + packet.sequence, + )); + + match packet_rec { + Ok(_receipt) => { + output.emit(IbcEvent::ReceivePacket(ReceivePacket { + height: Height::zero(), + packet: msg.packet.clone(), + })); + return Ok(output.with_result(PacketResult::Recv(RecvPacketResult::NoOp))) + }, + Err(e) if e.detail() == Error::packet_receipt_not_found(packet.sequence).detail() => { + // store a receipt that does not contain any data + PacketResult::Recv(RecvPacketResult::Unordered { + port_id: packet.destination_port.clone(), + channel_id: packet.destination_channel, + sequence: packet.sequence, + receipt: Receipt::Ok, + packet: packet.clone(), + }) + }, + Err(e) => return Err(Error::implementation_specific(e.to_string())), + } + }; + + output.log("success: packet receive"); + + output.emit(IbcEvent::ReceivePacket(ReceivePacket { + height: ctx.host_height(), + packet: msg.packet.clone(), + })); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::core::ics04_channel::context::ChannelReader; - use crate::prelude::*; - - use test_log::test; - - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; - use crate::core::ics04_channel::handler::recv_packet::process; - use crate::core::ics04_channel::msgs::recv_packet::test_util::get_dummy_raw_msg_recv_packet; - use crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; - use crate::mock::context::MockContext; - use crate::relayer::ics18_relayer::context::Ics18Context; - use crate::test_utils::get_dummy_account_id; - use crate::timestamp::Timestamp; - use crate::timestamp::ZERO_DURATION; - use crate::{core::ics04_channel::packet::Packet, events::IbcEvent}; - - #[test] - fn recv_packet_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: MsgRecvPacket, - want_pass: bool, - } - - let context = MockContext::default(); - - let host_height = context.query_latest_height().increment(); - - let client_height = host_height.increment(); - - let msg = - MsgRecvPacket::try_from(get_dummy_raw_msg_recv_packet(client_height.revision_height)) - .unwrap(); - - let packet = msg.packet.clone(); - - let packet_old = Packet { - sequence: 1.into(), - source_port: PortId::default(), - source_channel: ChannelId::default(), - destination_port: PortId::default(), - destination_channel: ChannelId::default(), - data: Vec::new(), - timeout_height: client_height, - timeout_timestamp: Timestamp::from_nanoseconds(1).unwrap(), - }; - - let msg_packet_old = - MsgRecvPacket::new(packet_old, msg.proofs.clone(), get_dummy_account_id()); - - let dest_channel_end = ChannelEnd::new( - State::Open, - Order::default(), - Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)), - vec![ConnectionId::default()], - Version::ics20(), - ); - - let connection_end = ConnectionEnd::new( - ConnectionState::Open, - ClientId::default(), - ConnectionCounterparty::new( - ClientId::default(), - Some(ConnectionId::default()), - Default::default(), - ), - get_compatible_versions(), - ZERO_DURATION, - ); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no channel exists in the context".to_string(), - ctx: context.clone(), - msg: msg.clone(), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context - .clone() - .with_client(&ClientId::default(), client_height) - .with_connection(ConnectionId::default(), connection_end.clone()) - .with_channel( - packet.destination_port.clone(), - packet.destination_channel, - dest_channel_end.clone(), - ) - .with_send_sequence( - packet.destination_port.clone(), - packet.destination_channel, - 1.into(), - ) - .with_height(host_height) - // This `with_recv_sequence` is required for ordered channels - .with_recv_sequence( - packet.destination_port.clone(), - packet.destination_channel, - packet.sequence, - ), - msg, - want_pass: true, - }, - Test { - name: "Packet timeout expired".to_string(), - ctx: context - .with_client(&ClientId::default(), client_height) - .with_connection(ConnectionId::default(), connection_end) - .with_channel(PortId::default(), ChannelId::default(), dest_channel_end) - .with_send_sequence(PortId::default(), ChannelId::default(), 1.into()) - .with_height(host_height), - msg: msg_packet_old, - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = process(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + use crate::prelude::*; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::context::ClientReader, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + handler::recv_packet::process, + msgs::recv_packet::{test_util::get_dummy_raw_msg_recv_packet, MsgRecvPacket}, + packet::Packet, + Version, + }, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + test_utils::get_dummy_account_id, + timestamp::{Timestamp, ZERO_DURATION}, + }; + + #[test] + fn recv_packet_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: MsgRecvPacket, + want_pass: bool, + } + + let context = MockContext::default(); + + let host_height = context.latest_height().increment(); + + let client_height = host_height.increment(); + + let msg = + MsgRecvPacket::try_from(get_dummy_raw_msg_recv_packet(client_height.revision_height)) + .unwrap(); + + let packet = msg.packet.clone(); + + let packet_old = Packet { + sequence: 1.into(), + source_port: PortId::default(), + source_channel: ChannelId::default(), + destination_port: PortId::default(), + destination_channel: ChannelId::default(), + data: Vec::new(), + timeout_height: client_height, + timeout_timestamp: Timestamp::from_nanoseconds(1).unwrap(), + }; + + let msg_packet_old = + MsgRecvPacket::new(packet_old, msg.proofs.clone(), get_dummy_account_id()); + + let dest_channel_end = ChannelEnd::new( + State::Open, + Order::default(), + Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)), + vec![ConnectionId::default()], + Version::ics20(), + ); + + let connection_end = ConnectionEnd::new( + ConnectionState::Open, + ClientId::default(), + ConnectionCounterparty::new( + ClientId::default(), + Some(ConnectionId::default()), + Default::default(), + ), + get_compatible_versions(), + ZERO_DURATION, + ); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no channel exists in the context".to_string(), + ctx: context.clone(), + msg: msg.clone(), + want_pass: false, + }, + Test { + name: "Good parameters".to_string(), + ctx: context + .clone() + .with_client(&ClientId::default(), client_height) + .with_connection(ConnectionId::default(), connection_end.clone()) + .with_channel( + packet.destination_port.clone(), + packet.destination_channel, + dest_channel_end.clone(), + ) + .with_send_sequence( + packet.destination_port.clone(), + packet.destination_channel, + 1.into(), + ) + .with_height(host_height) + // This `with_recv_sequence` is required for ordered channels + .with_recv_sequence( + packet.destination_port.clone(), + packet.destination_channel, + packet.sequence, + ), + msg, + want_pass: true, + }, + Test { + name: "Packet timeout expired".to_string(), + ctx: context + .with_client(&ClientId::default(), client_height) + .with_connection(ConnectionId::default(), connection_end) + .with_channel(PortId::default(), ChannelId::default(), dest_channel_end) + .with_send_sequence(PortId::default(), ChannelId::default(), 1.into()) + .with_height(host_height), + msg: msg_packet_old, + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = process(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "recv_packet: test passed but was supposed to fail for test: {}, \nparams \n msg={:?}\nctx:{:?}", test.name, @@ -283,15 +295,15 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. + assert!(!proto_output.events.is_empty()); // Some events must exist. - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::ReceivePacket(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::ReceivePacket(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( !test.want_pass, "recv_packet: did not pass test: {}, \nparams \nmsg={:?}\nctx={:?}\nerror={:?}", test.name, @@ -299,8 +311,8 @@ mod tests { test.ctx.clone(), e, ); - } - } - } - } + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/send_packet.rs b/modules/src/core/ics04_channel/handler/send_packet.rs index 00e573aef2..63854e9820 100644 --- a/modules/src/core/ics04_channel/handler/send_packet.rs +++ b/modules/src/core/ics04_channel/handler/send_packet.rs @@ -1,214 +1,223 @@ -use crate::core::ics02_client::client_state::ClientState; -use crate::core::ics04_channel::channel::Counterparty; -use crate::core::ics04_channel::channel::State; -use crate::core::ics04_channel::commitment::PacketCommitment; -use crate::core::ics04_channel::events::SendPacket; -use crate::core::ics04_channel::packet::{PacketResult, Sequence}; -use crate::core::ics04_channel::{context::ChannelReader, error::Error, packet::Packet}; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; -use crate::timestamp::Expiry; +use crate::{ + core::{ + ics02_client::{client_consensus::ConsensusState, client_state::ClientState}, + ics04_channel::{ + channel::{Counterparty, State}, + commitment::PacketCommitment, + error::Error, + events::SendPacket, + packet::{Packet, PacketResult, Sequence}, + }, + ics24_host::identifier::{ChannelId, PortId}, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, + timestamp::Expiry, +}; #[derive(Clone, Debug)] pub struct SendPacketResult { - pub port_id: PortId, - pub channel_id: ChannelId, - pub seq: Sequence, - pub seq_number: Sequence, - pub commitment: PacketCommitment, + pub port_id: PortId, + pub channel_id: ChannelId, + pub seq: Sequence, + pub seq_number: Sequence, + pub commitment: PacketCommitment, + pub packet: Packet, } -pub fn send_packet(ctx: &dyn ChannelReader, packet: Packet) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - let source_channel_end = - ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; - - if source_channel_end.state_matches(&State::Closed) { - return Err(Error::channel_closed(packet.source_channel)); - } - - let counterparty = Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ); - - if !source_channel_end.counterparty_matches(&counterparty) { - return Err(Error::invalid_packet_counterparty( - packet.destination_port.clone(), - packet.destination_channel, - )); - } - - let connection_end = ctx.connection_end(&source_channel_end.connection_hops()[0])?; - - let client_id = connection_end.client_id().clone(); - - let client_state = ctx.client_state(&client_id)?; - - // prevent accidental sends with clients that cannot be updated - if client_state.is_frozen() { - return Err(Error::frozen_client(connection_end.client_id().clone())); - } - - let latest_height = client_state.latest_height(); - - if !packet.timeout_height.is_zero() && packet.timeout_height <= latest_height { - return Err(Error::low_packet_height( - latest_height, - packet.timeout_height, - )); - } - - let consensus_state = ctx.client_consensus_state(&client_id, latest_height)?; - let latest_timestamp = consensus_state.timestamp(); - let packet_timestamp = packet.timeout_timestamp; - if let Expiry::Expired = latest_timestamp.check_expiry(&packet_timestamp) { - return Err(Error::low_packet_timestamp()); - } - - let next_seq_send = - ctx.get_next_sequence_send(&(packet.source_port.clone(), packet.source_channel))?; - - if packet.sequence != next_seq_send { - return Err(Error::invalid_packet_sequence( - packet.sequence, - next_seq_send, - )); - } - - output.log("success: packet send "); - - let result = PacketResult::Send(SendPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - seq_number: next_seq_send.increment(), - commitment: ctx.packet_commitment( - packet.data.clone(), - packet.timeout_height, - packet.timeout_timestamp, - ), - }); - - output.emit(IbcEvent::SendPacket(SendPacket { - height: ctx.host_height(), - packet, - })); - - Ok(output.with_result(result)) +pub fn send_packet( + ctx: &Ctx, + packet: Packet, +) -> HandlerResult { + let mut output = HandlerOutput::builder(); + + let source_channel_end = + ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; + + if source_channel_end.state_matches(&State::Closed) { + return Err(Error::channel_closed(packet.source_channel)) + } + + let counterparty = + Counterparty::new(packet.destination_port.clone(), Some(packet.destination_channel)); + + if !source_channel_end.counterparty_matches(&counterparty) { + return Err(Error::invalid_packet_counterparty( + packet.destination_port.clone(), + packet.destination_channel, + )) + } + + let connection_end = ctx + .connection_end(&source_channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + let client_id = connection_end.client_id().clone(); + + let client_state = ctx + .client_state(&client_id) + .map_err(|e| Error::implementation_specific(e.to_string()))?; + + // prevent accidental sends with clients that cannot be updated + if client_state.is_frozen() { + return Err(Error::frozen_client(connection_end.client_id().clone())) + } + + let latest_height = client_state.latest_height(); + + if !packet.timeout_height.is_zero() && packet.timeout_height <= latest_height { + return Err(Error::low_packet_height(latest_height, packet.timeout_height)) + } + + let consensus_state = ctx + .consensus_state(&client_id, latest_height) + .map_err(|_| Error::error_invalid_consensus_state())?; + let latest_timestamp = consensus_state.timestamp(); + let packet_timestamp = packet.timeout_timestamp; + if let Expiry::Expired = latest_timestamp.check_expiry(&packet_timestamp) { + return Err(Error::low_packet_timestamp()) + } + + let next_seq_send = + ctx.get_next_sequence_send(&(packet.source_port.clone(), packet.source_channel))?; + + if packet.sequence != next_seq_send { + return Err(Error::invalid_packet_sequence(packet.sequence, next_seq_send)) + } + + output.log("success: packet send "); + + let result = PacketResult::Send(SendPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + seq_number: next_seq_send.increment(), + packet: packet.clone(), + commitment: ctx.packet_commitment( + packet.data.clone(), + packet.timeout_height, + packet.timeout_timestamp, + ), + }); + + output.emit(IbcEvent::SendPacket(SendPacket { height: ctx.host_height(), packet })); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use core::ops::Add; - use core::time::Duration; - - use test_log::test; - - use crate::core::ics02_client::height::Height; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; - use crate::core::ics04_channel::context::ChannelReader; - use crate::core::ics04_channel::handler::send_packet::send_packet; - use crate::core::ics04_channel::packet::test_utils::get_dummy_raw_packet; - use crate::core::ics04_channel::packet::Packet; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::prelude::*; - use crate::timestamp::Timestamp; - use crate::timestamp::ZERO_DURATION; - - #[test] - fn send_packet_processing() { - struct Test { - name: String, - ctx: MockContext, - packet: Packet, - want_pass: bool, - } - - let context = MockContext::default(); - - let timestamp = Timestamp::now().add(Duration::from_secs(10)); - //CD:TODO remove unwrap - let mut packet: Packet = get_dummy_raw_packet(1, timestamp.unwrap().nanoseconds()) - .try_into() - .unwrap(); - packet.sequence = 1.into(); - packet.data = vec![0]; - - let channel_end = ChannelEnd::new( - State::TryOpen, - Order::default(), - Counterparty::new(PortId::default(), Some(ChannelId::default())), - vec![ConnectionId::default()], - Version::ics20(), - ); - - let connection_end = ConnectionEnd::new( - ConnectionState::Open, - ClientId::default(), - ConnectionCounterparty::new( - ClientId::default(), - Some(ConnectionId::default()), - Default::default(), - ), - get_compatible_versions(), - ZERO_DURATION, - ); - - let mut packet_old: Packet = get_dummy_raw_packet(1, 1).try_into().unwrap(); - packet_old.sequence = 1.into(); - packet_old.data = vec![0]; - - let client_height = Height::new(0, Height::default().revision_height + 1); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no channel exists in the context".to_string(), - ctx: context.clone(), - packet: packet.clone(), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context - .clone() - .with_client(&ClientId::default(), Height::default()) - .with_connection(ConnectionId::default(), connection_end.clone()) - .with_channel(PortId::default(), ChannelId::default(), channel_end.clone()) - .with_send_sequence(PortId::default(), ChannelId::default(), 1.into()), - packet, - want_pass: true, - }, - Test { - name: "Packet timeout".to_string(), - ctx: context - .with_client(&ClientId::default(), client_height) - .with_connection(ConnectionId::default(), connection_end) - .with_channel(PortId::default(), ChannelId::default(), channel_end) - .with_send_sequence(PortId::default(), ChannelId::default(), 1.into()), - packet: packet_old, - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = send_packet(&test.ctx, test.packet.clone()); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + use core::{ops::Add, time::Duration}; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::{context::ClientReader, height::Height}, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + handler::send_packet::send_packet, + packet::{test_utils::get_dummy_raw_packet, Packet}, + Version, + }, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + prelude::*, + timestamp::{Timestamp, ZERO_DURATION}, + }; + + #[test] + fn send_packet_processing() { + struct Test { + name: String, + ctx: MockContext, + packet: Packet, + want_pass: bool, + } + + let context = MockContext::default(); + + let timestamp = Timestamp::now().add(Duration::from_secs(10)); + //CD:TODO remove unwrap + let mut packet: Packet = + get_dummy_raw_packet(1, timestamp.unwrap().nanoseconds()).try_into().unwrap(); + packet.sequence = 1.into(); + packet.data = vec![0]; + + let channel_end = ChannelEnd::new( + State::TryOpen, + Order::default(), + Counterparty::new(PortId::default(), Some(ChannelId::default())), + vec![ConnectionId::default()], + Version::ics20(), + ); + + let connection_end = ConnectionEnd::new( + ConnectionState::Open, + ClientId::default(), + ConnectionCounterparty::new( + ClientId::default(), + Some(ConnectionId::default()), + Default::default(), + ), + get_compatible_versions(), + ZERO_DURATION, + ); + + let mut packet_old: Packet = get_dummy_raw_packet(1, 1).try_into().unwrap(); + packet_old.sequence = 1.into(); + packet_old.data = vec![0]; + + let client_height = Height::new(0, Height::default().revision_height + 1); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no channel exists in the context".to_string(), + ctx: context.clone(), + packet: packet.clone(), + want_pass: false, + }, + Test { + name: "Good parameters".to_string(), + ctx: context + .clone() + .with_client(&ClientId::default(), Height::default()) + .with_connection(ConnectionId::default(), connection_end.clone()) + .with_channel(PortId::default(), ChannelId::default(), channel_end.clone()) + .with_send_sequence(PortId::default(), ChannelId::default(), 1.into()), + packet, + want_pass: true, + }, + Test { + name: "Packet timeout".to_string(), + ctx: context + .with_client(&ClientId::default(), client_height) + .with_connection(ConnectionId::default(), connection_end) + .with_channel(PortId::default(), ChannelId::default(), channel_end) + .with_send_sequence(PortId::default(), ChannelId::default(), 1.into()), + packet: packet_old, + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = send_packet(&test.ctx, test.packet.clone()); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "send_packet: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -216,25 +225,25 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - // TODO: The object in the output is a PacketResult what can we check on it? - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::SendPacket(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "send_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.packet.clone(), - test.ctx.clone(), - e, - ); - } - } - } - } + assert!(!proto_output.events.is_empty()); // Some events must exist. + + // TODO: The object in the output is a PacketResult what can we check on it? + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::SendPacket(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "send_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.packet.clone(), + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/timeout.rs b/modules/src/core/ics04_channel/handler/timeout.rs index c6153aa3cb..bd91824a90 100644 --- a/modules/src/core/ics04_channel/handler/timeout.rs +++ b/modules/src/core/ics04_channel/handler/timeout.rs @@ -1,220 +1,221 @@ -use crate::core::ics04_channel::channel::State; -use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; -use crate::core::ics04_channel::events::TimeoutPacket; -use crate::core::ics04_channel::handler::verify::{ - verify_next_sequence_recv, verify_packet_receipt_absence, +use crate::{ + core::{ + ics02_client::client_consensus::ConsensusState, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + error::Error, + events::TimeoutPacket, + handler::verify::{verify_next_sequence_recv, verify_packet_receipt_absence}, + msgs::timeout::MsgTimeout, + packet::{PacketResult, Sequence}, + }, + ics24_host::identifier::{ChannelId, PortId}, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, }; -use crate::core::ics04_channel::msgs::timeout::MsgTimeout; -use crate::core::ics04_channel::packet::{PacketResult, Sequence}; -use crate::core::ics04_channel::{context::ChannelReader, error::Error}; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; -use crate::timestamp::Expiry; +use core::fmt::Debug; #[derive(Clone, Debug)] pub struct TimeoutPacketResult { - pub port_id: PortId, - pub channel_id: ChannelId, - pub seq: Sequence, - pub channel: Option, + pub port_id: PortId, + pub channel_id: ChannelId, + pub seq: Sequence, + pub channel: Option, } -pub fn process(ctx: &dyn ChannelReader, msg: &MsgTimeout) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - let packet = &msg.packet; - - let mut source_channel_end = - ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; - - if !source_channel_end.state_matches(&State::Open) { - return Err(Error::channel_closed(packet.source_channel)); - } - - let counterparty = Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ); - - if !source_channel_end.counterparty_matches(&counterparty) { - return Err(Error::invalid_packet_counterparty( - packet.destination_port.clone(), - packet.destination_channel, - )); - } - - let connection_end = ctx.connection_end(&source_channel_end.connection_hops()[0])?; - - let client_id = connection_end.client_id().clone(); - - // check that timeout height or timeout timestamp has passed on the other end - let proof_height = msg.proofs.height(); - let packet_height = packet.timeout_height; - - if (!packet.timeout_height.is_zero()) && packet_height > proof_height { - return Err(Error::packet_timeout_height_not_reached( - packet.timeout_height, - proof_height, - )); - } - - let consensus_state = ctx.client_consensus_state(&client_id, proof_height)?; - - let proof_timestamp = consensus_state.timestamp(); - - let packet_timestamp = packet.timeout_timestamp; - if let Expiry::Expired = packet_timestamp.check_expiry(&proof_timestamp) { - return Err(Error::packet_timeout_timestamp_not_reached( - packet_timestamp, - proof_timestamp, - )); - } - - //verify packet commitment - let packet_commitment = ctx.get_packet_commitment(&( - packet.source_port.clone(), - packet.source_channel, - packet.sequence, - ))?; - - let expected_commitment = ctx.packet_commitment( - packet.data.clone(), - packet.timeout_height, - packet.timeout_timestamp, - ); - if packet_commitment != expected_commitment { - return Err(Error::incorrect_packet_commitment(packet.sequence)); - } - - let result = if source_channel_end.order_matches(&Order::Ordered) { - if packet.sequence < msg.next_sequence_recv { - return Err(Error::invalid_packet_sequence( - packet.sequence, - msg.next_sequence_recv, - )); - } - verify_next_sequence_recv( - ctx, - msg.proofs.height(), - &connection_end, - packet.clone(), - msg.next_sequence_recv, - &msg.proofs, - )?; - - source_channel_end.state = State::Closed; - PacketResult::Timeout(TimeoutPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - channel: Some(source_channel_end), - }) - } else { - verify_packet_receipt_absence( - ctx, - msg.proofs.height(), - &connection_end, - packet.clone(), - &msg.proofs, - )?; - - PacketResult::Timeout(TimeoutPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - channel: None, - }) - }; - - output.log("success: packet timeout "); - - output.emit(IbcEvent::TimeoutPacket(TimeoutPacket { - height: ctx.host_height(), - packet: packet.clone(), - })); - - Ok(output.with_result(result)) +pub fn process(ctx: &Ctx, msg: &MsgTimeout) -> HandlerResult +where + Ctx: ReaderContext, +{ + let mut output = HandlerOutput::builder(); + + let packet = &msg.packet; + + let mut source_channel_end = + ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; + + if !source_channel_end.state_matches(&State::Open) { + return Err(Error::channel_closed(packet.source_channel)) + } + + let counterparty = + Counterparty::new(packet.destination_port.clone(), Some(packet.destination_channel)); + + if !source_channel_end.counterparty_matches(&counterparty) { + return Err(Error::invalid_packet_counterparty( + packet.destination_port.clone(), + packet.destination_channel, + )) + } + + let connection_end = ctx + .connection_end(&source_channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + let client_id = connection_end.client_id().clone(); + + // check that timeout height or timeout timestamp has passed on the other end + let proof_height = msg.proofs.height(); + + let consensus_state = ctx + .consensus_state(&client_id, proof_height) + .map_err(|_| Error::error_invalid_consensus_state())?; + + let proof_timestamp = consensus_state.timestamp(); + + if !packet.timed_out(&proof_timestamp, proof_height) { + return Err(Error::packet_timeout_not_reached( + packet.timeout_height, + proof_height, + packet.timeout_timestamp, + proof_timestamp, + )) + } + + //verify packet commitment + let packet_commitment = ctx.get_packet_commitment(&( + packet.source_port.clone(), + packet.source_channel, + packet.sequence, + ))?; + + let expected_commitment = + ctx.packet_commitment(packet.data.clone(), packet.timeout_height, packet.timeout_timestamp); + if packet_commitment != expected_commitment { + return Err(Error::incorrect_packet_commitment(packet.sequence)) + } + + let result = if source_channel_end.order_matches(&Order::Ordered) { + if packet.sequence < msg.next_sequence_recv { + return Err(Error::invalid_packet_sequence(packet.sequence, msg.next_sequence_recv)) + } + verify_next_sequence_recv::( + ctx, + msg.proofs.height(), + &connection_end, + packet.clone(), + msg.next_sequence_recv, + &msg.proofs, + )?; + + source_channel_end.state = State::Closed; + PacketResult::Timeout(TimeoutPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + channel: Some(source_channel_end), + }) + } else { + verify_packet_receipt_absence::( + ctx, + msg.proofs.height(), + &connection_end, + packet.clone(), + &msg.proofs, + )?; + + PacketResult::Timeout(TimeoutPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + channel: None, + }) + }; + + output.log("success: packet timeout "); + + output.emit(IbcEvent::TimeoutPacket(TimeoutPacket { + height: ctx.host_height(), + packet: packet.clone(), + })); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use test_log::test; - - use crate::core::ics02_client::height::Height; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; - use crate::core::ics04_channel::context::ChannelReader; - use crate::core::ics04_channel::handler::timeout::process; - use crate::core::ics04_channel::msgs::timeout::test_util::get_dummy_raw_msg_timeout; - use crate::core::ics04_channel::msgs::timeout::MsgTimeout; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::prelude::*; - use crate::timestamp::ZERO_DURATION; - - #[test] - fn timeout_packet_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: MsgTimeout, - want_pass: bool, - } - - let context = MockContext::default(); - - let height = Height::default().revision_height + 2; - let timeout_timestamp = 5; - - let client_height = Height::new(0, Height::default().revision_height + 2); - - let msg = - MsgTimeout::try_from(get_dummy_raw_msg_timeout(height, timeout_timestamp)).unwrap(); - let packet = msg.packet.clone(); - - let mut msg_ok = msg.clone(); - msg_ok.packet.timeout_timestamp = Default::default(); - - let data = context.packet_commitment( - msg_ok.packet.data.clone(), - msg_ok.packet.timeout_height, - msg_ok.packet.timeout_timestamp, - ); - - let source_channel_end = ChannelEnd::new( - State::Open, - Order::default(), - Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ), - vec![ConnectionId::default()], - Version::ics20(), - ); - - let mut source_ordered_channel_end = source_channel_end.clone(); - source_ordered_channel_end.ordering = Order::Ordered; - - let connection_end = ConnectionEnd::new( - ConnectionState::Open, - ClientId::default(), - ConnectionCounterparty::new( - ClientId::default(), - Some(ConnectionId::default()), - Default::default(), - ), - get_compatible_versions(), - ZERO_DURATION, - ); - - let tests: Vec = vec![ + use test_log::test; + + use crate::{ + core::{ + ics02_client::{context::ClientReader, height::Height}, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + context::ChannelReader, + handler::timeout::process, + msgs::timeout::{test_util::get_dummy_raw_msg_timeout, MsgTimeout}, + Version, + }, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + prelude::*, + timestamp::ZERO_DURATION, + }; + + #[test] + fn timeout_packet_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: MsgTimeout, + want_pass: bool, + } + + let context = MockContext::default(); + + let height = Height::default().revision_height + 2; + let timeout_timestamp = 5; + + let client_height = Height::new(0, Height::default().revision_height + 2); + + let msg = + MsgTimeout::try_from(get_dummy_raw_msg_timeout(height, timeout_timestamp)).unwrap(); + let packet = msg.packet.clone(); + + let mut msg_ok = msg.clone(); + msg_ok.packet.timeout_timestamp = Default::default(); + + let data = context.packet_commitment( + msg_ok.packet.data.clone(), + msg_ok.packet.timeout_height, + msg_ok.packet.timeout_timestamp, + ); + + let source_channel_end = ChannelEnd::new( + State::Open, + Order::default(), + Counterparty::new(packet.destination_port.clone(), Some(packet.destination_channel)), + vec![ConnectionId::default()], + Version::ics20(), + ); + + let mut source_ordered_channel_end = source_channel_end.clone(); + source_ordered_channel_end.ordering = Order::Ordered; + + let connection_end = ConnectionEnd::new( + ConnectionState::Open, + ClientId::default(), + ConnectionCounterparty::new( + ClientId::default(), + Some(ConnectionId::default()), + Default::default(), + ), + get_compatible_versions(), + ZERO_DURATION, + ); + + let tests: Vec = vec![ Test { name: "Processing fails because no channel exists in the context".to_string(), ctx: context.clone(), @@ -293,12 +294,12 @@ mod tests { .into_iter() .collect(); - for test in tests { - let res = process(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + for test in tests { + let res = process(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "TO_packet: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -306,24 +307,24 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::TimeoutPacket(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "timeout_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg.clone(), - test.ctx.clone(), - e, - ); - } - } - } - } + assert!(!proto_output.events.is_empty()); // Some events must exist. + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::TimeoutPacket(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "timeout_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg.clone(), + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/timeout_on_close.rs b/modules/src/core/ics04_channel/handler/timeout_on_close.rs index 04a7550ec6..828114d2de 100644 --- a/modules/src/core/ics04_channel/handler/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/handler/timeout_on_close.rs @@ -1,258 +1,267 @@ -use crate::core::ics04_channel::channel::State; -use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; -use crate::core::ics04_channel::events::TimeoutOnClosePacket; -use crate::core::ics04_channel::handler::verify::verify_channel_proofs; -use crate::core::ics04_channel::handler::verify::{ - verify_next_sequence_recv, verify_packet_receipt_absence, +use crate::{ + core::{ + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + error::Error, + events::TimeoutOnClosePacket, + handler::{ + timeout::TimeoutPacketResult, + verify::{ + verify_channel_proofs, verify_next_sequence_recv, verify_packet_receipt_absence, + }, + }, + msgs::timeout_on_close::MsgTimeoutOnClose, + packet::PacketResult, + }, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, }; -use crate::core::ics04_channel::msgs::timeout_on_close::MsgTimeoutOnClose; -use crate::core::ics04_channel::packet::PacketResult; -use crate::core::ics04_channel::{ - context::ChannelReader, error::Error, handler::timeout::TimeoutPacketResult, -}; -use crate::events::IbcEvent; -use crate::handler::{HandlerOutput, HandlerResult}; -use crate::prelude::*; -pub fn process( - ctx: &dyn ChannelReader, - msg: &MsgTimeoutOnClose, +pub fn process( + ctx: &Ctx, + msg: &MsgTimeoutOnClose, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - let packet = &msg.packet; - - let source_channel_end = - ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; - - let counterparty = Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ); - - if !source_channel_end.counterparty_matches(&counterparty) { - return Err(Error::invalid_packet_counterparty( - packet.destination_port.clone(), - packet.destination_channel, - )); - } - - let connection_end = ctx.connection_end(&source_channel_end.connection_hops()[0])?; - - //verify the packet was sent, check the store - let packet_commitment = ctx.get_packet_commitment(&( - packet.source_port.clone(), - packet.source_channel, - packet.sequence, - ))?; - - let expected_commitment = ctx.packet_commitment( - packet.data.clone(), - packet.timeout_height, - packet.timeout_timestamp, - ); - if packet_commitment != expected_commitment { - return Err(Error::incorrect_packet_commitment(packet.sequence)); - } - - let expected_counterparty = - Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)); - - let counterparty = connection_end.counterparty(); - let ccid = counterparty.connection_id().ok_or_else(|| { - Error::undefined_connection_counterparty(source_channel_end.connection_hops()[0].clone()) - })?; - - let expected_connection_hops = vec![ccid.clone()]; - - let expected_channel_end = ChannelEnd::new( - State::Closed, - *source_channel_end.ordering(), - expected_counterparty, - expected_connection_hops, - source_channel_end.version().clone(), - ); - - verify_channel_proofs( - ctx, - msg.proofs.height(), - &source_channel_end, - &connection_end, - &expected_channel_end, - &msg.proofs, - )?; - - let result = if source_channel_end.order_matches(&Order::Ordered) { - if packet.sequence < msg.next_sequence_recv { - return Err(Error::invalid_packet_sequence( - packet.sequence, - msg.next_sequence_recv, - )); - } - verify_next_sequence_recv( - ctx, - msg.proofs.height(), - &connection_end, - packet.clone(), - msg.next_sequence_recv, - &msg.proofs, - )?; - - PacketResult::Timeout(TimeoutPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - channel: Some(source_channel_end), - }) - } else { - verify_packet_receipt_absence( - ctx, - msg.proofs.height(), - &connection_end, - packet.clone(), - &msg.proofs, - )?; - - PacketResult::Timeout(TimeoutPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - channel: None, - }) - }; - - output.log("success: packet timeout "); - - output.emit(IbcEvent::TimeoutOnClosePacket(TimeoutOnClosePacket { - height: ctx.host_height(), - packet: packet.clone(), - })); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + let packet = &msg.packet; + + let source_channel_end = + ctx.channel_end(&(packet.source_port.clone(), packet.source_channel))?; + + let counterparty = + Counterparty::new(packet.destination_port.clone(), Some(packet.destination_channel)); + + if !source_channel_end.counterparty_matches(&counterparty) { + return Err(Error::invalid_packet_counterparty( + packet.destination_port.clone(), + packet.destination_channel, + )) + } + + let connection_end = ctx + .connection_end(&source_channel_end.connection_hops()[0]) + .map_err(Error::ics03_connection)?; + + //verify the packet was sent, check the store + let packet_commitment = ctx.get_packet_commitment(&( + packet.source_port.clone(), + packet.source_channel, + packet.sequence, + ))?; + + let expected_commitment = + ctx.packet_commitment(packet.data.clone(), packet.timeout_height, packet.timeout_timestamp); + if packet_commitment != expected_commitment { + return Err(Error::incorrect_packet_commitment(packet.sequence)) + } + + let expected_counterparty = + Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)); + + let counterparty = connection_end.counterparty(); + let ccid = counterparty.connection_id().ok_or_else(|| { + Error::undefined_connection_counterparty(source_channel_end.connection_hops()[0].clone()) + })?; + + let expected_connection_hops = vec![ccid.clone()]; + + let expected_channel_end = ChannelEnd::new( + State::Closed, + *source_channel_end.ordering(), + expected_counterparty, + expected_connection_hops, + source_channel_end.version().clone(), + ); + + verify_channel_proofs::( + ctx, + msg.proofs.height(), + &source_channel_end, + &connection_end, + &expected_channel_end, + msg.proofs + .other_proof() + .as_ref() + .ok_or_else(|| Error::missing_channel_proof())?, + )?; + + let result = if source_channel_end.order_matches(&Order::Ordered) { + if packet.sequence < msg.next_sequence_recv { + return Err(Error::invalid_packet_sequence(packet.sequence, msg.next_sequence_recv)) + } + verify_next_sequence_recv::( + ctx, + msg.proofs.height(), + &connection_end, + packet.clone(), + msg.next_sequence_recv, + &msg.proofs, + )?; + + PacketResult::Timeout(TimeoutPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + channel: Some(source_channel_end), + }) + } else { + verify_packet_receipt_absence::( + ctx, + msg.proofs.height(), + &connection_end, + packet.clone(), + &msg.proofs, + )?; + + PacketResult::Timeout(TimeoutPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + channel: None, + }) + }; + + output.log("success: packet timeout "); + + output.emit(IbcEvent::TimeoutOnClosePacket(TimeoutOnClosePacket { + height: ctx.host_height(), + packet: packet.clone(), + })); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use test_log::test; - - use crate::core::ics02_client::height::Height; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; - use crate::core::ics04_channel::context::ChannelReader; - use crate::core::ics04_channel::handler::timeout_on_close::process; - use crate::core::ics04_channel::msgs::timeout_on_close::test_util::get_dummy_raw_msg_timeout_on_close; - use crate::core::ics04_channel::msgs::timeout_on_close::MsgTimeoutOnClose; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; - use crate::events::IbcEvent; - use crate::mock::context::MockContext; - use crate::prelude::*; - use crate::timestamp::ZERO_DURATION; - - #[test] - fn timeout_on_close_packet_processing() { - struct Test { - name: String, - ctx: MockContext, - msg: MsgTimeoutOnClose, - want_pass: bool, - } - - let context = MockContext::default(); - - let height = Height::default().revision_height + 2; - let timeout_timestamp = 5; - - let client_height = Height::new(0, Height::default().revision_height + 2); - - let msg = MsgTimeoutOnClose::try_from(get_dummy_raw_msg_timeout_on_close( - height, - timeout_timestamp, - )) - .unwrap(); - let packet = msg.packet.clone(); - - let data = context.packet_commitment( - msg.packet.data.clone(), - msg.packet.timeout_height, - msg.packet.timeout_timestamp, - ); - - let source_channel_end = ChannelEnd::new( - State::Open, - Order::Ordered, - Counterparty::new( - packet.destination_port.clone(), - Some(packet.destination_channel), - ), - vec![ConnectionId::default()], - Version::ics20(), - ); - - let connection_end = ConnectionEnd::new( - ConnectionState::Open, - ClientId::default(), - ConnectionCounterparty::new( - ClientId::default(), - Some(ConnectionId::default()), - Default::default(), - ), - get_compatible_versions(), - ZERO_DURATION, - ); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no channel exists in the context".to_string(), - ctx: context.clone(), - msg: msg.clone(), - want_pass: false, - }, - Test { - name: "Processing fails no packet commitment is found".to_string(), - ctx: context - .clone() - .with_channel( - PortId::default(), - ChannelId::default(), - source_channel_end.clone(), - ) - .with_connection(ConnectionId::default(), connection_end.clone()), - msg: msg.clone(), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context - .with_client(&ClientId::default(), client_height) - .with_connection(ConnectionId::default(), connection_end) - .with_channel( - packet.source_port.clone(), - packet.source_channel, - source_channel_end, - ) - .with_packet_commitment( - msg.packet.source_port.clone(), - msg.packet.source_channel, - msg.packet.sequence, - data, - ), - msg, - want_pass: true, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = process(&test.ctx, &test.msg); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + use test_log::test; + + use crate::{ + core::{ + ics02_client::{context::ClientReader, height::Height}, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + context::ChannelReader, + handler::timeout_on_close::process, + msgs::timeout_on_close::{ + test_util::get_dummy_raw_msg_timeout_on_close, MsgTimeoutOnClose, + }, + Version, + }, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + prelude::*, + timestamp::ZERO_DURATION, + }; + + #[test] + fn timeout_on_close_packet_processing() { + struct Test { + name: String, + ctx: MockContext, + msg: MsgTimeoutOnClose, + want_pass: bool, + } + + let context = MockContext::default(); + + let height = Height::default().revision_height + 2; + let timeout_timestamp = 5; + + let client_height = Height::new(0, Height::default().revision_height + 2); + + let msg = MsgTimeoutOnClose::try_from(get_dummy_raw_msg_timeout_on_close( + height, + timeout_timestamp, + )) + .unwrap(); + let packet = msg.packet.clone(); + + let data = context.packet_commitment( + msg.packet.data.clone(), + msg.packet.timeout_height, + msg.packet.timeout_timestamp, + ); + + let source_channel_end = ChannelEnd::new( + State::Open, + Order::Ordered, + Counterparty::new(packet.destination_port.clone(), Some(packet.destination_channel)), + vec![ConnectionId::default()], + Version::ics20(), + ); + + let connection_end = ConnectionEnd::new( + ConnectionState::Open, + ClientId::default(), + ConnectionCounterparty::new( + ClientId::default(), + Some(ConnectionId::default()), + Default::default(), + ), + get_compatible_versions(), + ZERO_DURATION, + ); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no channel exists in the context".to_string(), + ctx: context.clone(), + msg: msg.clone(), + want_pass: false, + }, + Test { + name: "Processing fails no packet commitment is found".to_string(), + ctx: context + .clone() + .with_channel( + PortId::default(), + ChannelId::default(), + source_channel_end.clone(), + ) + .with_connection(ConnectionId::default(), connection_end.clone()), + msg: msg.clone(), + want_pass: false, + }, + Test { + name: "Good parameters".to_string(), + ctx: context + .with_client(&ClientId::default(), client_height) + .with_connection(ConnectionId::default(), connection_end) + .with_channel( + packet.source_port.clone(), + packet.source_channel, + source_channel_end, + ) + .with_packet_commitment( + msg.packet.source_port.clone(), + msg.packet.source_channel, + msg.packet.sequence, + data, + ), + msg, + want_pass: true, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = process(&test.ctx, &test.msg); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "TO_on_close_packet: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -260,23 +269,23 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::TimeoutOnClosePacket(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "timeout_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.msg.clone(), - test.ctx.clone(), - e, - ); - } - } - } - } + assert!(!proto_output.events.is_empty()); // Some events must exist. + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::TimeoutOnClosePacket(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "timeout_packet: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.msg.clone(), + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/handler/verify.rs b/modules/src/core/ics04_channel/handler/verify.rs index 96735f4938..0284f609c6 100644 --- a/modules/src/core/ics04_channel/handler/verify.rs +++ b/modules/src/core/ics04_channel/handler/verify.rs @@ -1,214 +1,243 @@ -use crate::core::ics02_client::client_consensus::ConsensusState; -use crate::core::ics02_client::client_state::ClientState; -use crate::core::ics02_client::{client_def::AnyClient, client_def::ClientDef}; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::context::ChannelReader; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::msgs::acknowledgement::Acknowledgement; -use crate::core::ics04_channel::packet::{Packet, Sequence}; -use crate::prelude::*; -use crate::proofs::Proofs; -use crate::Height; +use crate::{ + core::{ + ics02_client::{ + client_consensus::ConsensusState, client_def::ClientDef, client_state::ClientState, + }, + ics03_connection::connection::ConnectionEnd, + ics04_channel::{ + channel::ChannelEnd, + error::Error, + msgs::acknowledgement::Acknowledgement, + packet::{Packet, Sequence}, + }, + ics23_commitment::commitment::CommitmentProofBytes, + ics26_routing::context::ReaderContext, + }, + prelude::*, + proofs::Proofs, + Height, +}; /// Entry point for verifying all proofs bundled in any ICS4 message for channel protocols. -pub fn verify_channel_proofs( - ctx: &dyn ChannelReader, - height: Height, - channel_end: &ChannelEnd, - connection_end: &ConnectionEnd, - expected_chan: &ChannelEnd, - proofs: &Proofs, -) -> Result<(), Error> { - // This is the client which will perform proof verification. - let client_id = connection_end.client_id().clone(); - - let client_state = ctx.client_state(&client_id)?; - - // The client must not be frozen. - if client_state.is_frozen() { - return Err(Error::frozen_client(client_id)); - } - - let consensus_state = ctx.client_consensus_state(&client_id, proofs.height())?; - - let client_def = AnyClient::from_client_type(client_state.client_type()); - - // Verify the proof for the channel state against the expected channel end. - // A counterparty channel id of None in not possible, and is checked by validate_basic in msg. - client_def - .verify_channel_state( - &client_state, - height, - connection_end.counterparty().prefix(), - proofs.object_proof(), - consensus_state.root(), - channel_end.counterparty().port_id(), - channel_end.counterparty().channel_id().unwrap(), - expected_chan, - ) - .map_err(Error::verify_channel_failed) +pub fn verify_channel_proofs( + ctx: &Ctx, + height: Height, + channel_end: &ChannelEnd, + connection_end: &ConnectionEnd, + expected_chan: &ChannelEnd, + proof: &CommitmentProofBytes, +) -> Result<(), Error> +where + Ctx: ReaderContext, +{ + // This is the client which will perform proof verification. + let client_id = connection_end.client_id().clone(); + + let client_state = ctx.client_state(&client_id).map_err(Error::ics02_client)?; + + // The client must not be frozen. + if client_state.is_frozen() { + return Err(Error::frozen_client(client_id)) + } + + let consensus_state = ctx + .consensus_state(&client_id, height) + .map_err(|_| Error::error_invalid_consensus_state())?; + + let client_def = client_state.client_def(); + + // Verify the proof for the channel state against the expected channel end. + // A counterparty channel id of None in not possible, and is checked by validate_basic in msg. + client_def + .verify_channel_state( + ctx, + &client_id, + &client_state, + height, + connection_end.counterparty().prefix(), + &proof, + consensus_state.root(), + channel_end.counterparty().port_id(), + channel_end.counterparty().channel_id().unwrap(), + expected_chan, + ) + .map_err(Error::verify_channel_failed) } /// Entry point for verifying all proofs bundled in a ICS4 packet recv. message. -pub fn verify_packet_recv_proofs( - ctx: &dyn ChannelReader, - height: Height, - packet: &Packet, - connection_end: &ConnectionEnd, - proofs: &Proofs, +pub fn verify_packet_recv_proofs( + ctx: &Ctx, + height: Height, + packet: &Packet, + connection_end: &ConnectionEnd, + proofs: &Proofs, ) -> Result<(), Error> { - let client_id = connection_end.client_id(); - let client_state = ctx.client_state(client_id)?; - - // The client must not be frozen. - if client_state.is_frozen() { - return Err(Error::frozen_client(client_id.clone())); - } - - let consensus_state = ctx.client_consensus_state(client_id, proofs.height())?; - - let client_def = AnyClient::from_client_type(client_state.client_type()); - - let commitment = ctx.packet_commitment( - packet.data.clone(), - packet.timeout_height, - packet.timeout_timestamp, - ); - - // Verify the proof for the packet against the chain store. - client_def - .verify_packet_data( - ctx, - &client_state, - height, - connection_end, - proofs.object_proof(), - consensus_state.root(), - &packet.source_port, - &packet.source_channel, - packet.sequence, - commitment, - ) - .map_err(|e| Error::packet_verification_failed(packet.sequence, e))?; - - Ok(()) + let client_id = connection_end.client_id(); + let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; + + // The client must not be frozen. + if client_state.is_frozen() { + return Err(Error::frozen_client(client_id.clone())) + } + + let consensus_state = ctx + .consensus_state(client_id, proofs.height()) + .map_err(|_| Error::error_invalid_consensus_state())?; + + let client_def = client_state.client_def(); + + let commitment = + ctx.packet_commitment(packet.data.clone(), packet.timeout_height, packet.timeout_timestamp); + + // Verify the proof for the packet against the chain store. + client_def + .verify_packet_data( + ctx, + client_id, + &client_state, + height, + connection_end, + proofs.object_proof(), + consensus_state.root(), + &packet.source_port, + &packet.source_channel, + packet.sequence, + commitment, + ) + .map_err(|e| Error::packet_verification_failed(packet.sequence, e))?; + + Ok(()) } /// Entry point for verifying all proofs bundled in an ICS4 packet ack message. -pub fn verify_packet_acknowledgement_proofs( - ctx: &dyn ChannelReader, - height: Height, - packet: &Packet, - acknowledgement: Acknowledgement, - connection_end: &ConnectionEnd, - proofs: &Proofs, +pub fn verify_packet_acknowledgement_proofs( + ctx: &Ctx, + height: Height, + packet: &Packet, + acknowledgement: Acknowledgement, + connection_end: &ConnectionEnd, + proofs: &Proofs, ) -> Result<(), Error> { - let client_id = connection_end.client_id(); - let client_state = ctx.client_state(client_id)?; - - // The client must not be frozen. - if client_state.is_frozen() { - return Err(Error::frozen_client(client_id.clone())); - } - - let consensus_state = ctx.client_consensus_state(client_id, proofs.height())?; - - let ack_commitment = ctx.ack_commitment(acknowledgement); - - let client_def = AnyClient::from_client_type(client_state.client_type()); - - // Verify the proof for the packet against the chain store. - client_def - .verify_packet_acknowledgement( - ctx, - &client_state, - height, - connection_end, - proofs.object_proof(), - consensus_state.root(), - &packet.destination_port, - &packet.destination_channel, - packet.sequence, - ack_commitment, - ) - .map_err(|e| Error::packet_verification_failed(packet.sequence, e))?; - - Ok(()) + let client_id = connection_end.client_id(); + let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; + + // The client must not be frozen. + if client_state.is_frozen() { + return Err(Error::frozen_client(client_id.clone())) + } + + let consensus_state = ctx + .consensus_state(client_id, proofs.height()) + .map_err(|_| Error::error_invalid_consensus_state())?; + + let ack_commitment = ctx.ack_commitment(acknowledgement); + + let client_def = client_state.client_def(); + + // Verify the proof for the packet against the chain store. + client_def + .verify_packet_acknowledgement( + ctx, + client_id, + &client_state, + height, + connection_end, + proofs.object_proof(), + consensus_state.root(), + &packet.destination_port, + &packet.destination_channel, + packet.sequence, + ack_commitment, + ) + .map_err(|e| Error::packet_verification_failed(packet.sequence, e))?; + + Ok(()) } /// Entry point for verifying all timeout proofs. -pub fn verify_next_sequence_recv( - ctx: &dyn ChannelReader, - height: Height, - connection_end: &ConnectionEnd, - packet: Packet, - seq: Sequence, - proofs: &Proofs, -) -> Result<(), Error> { - let client_id = connection_end.client_id(); - let client_state = ctx.client_state(client_id)?; - - // The client must not be frozen. - if client_state.is_frozen() { - return Err(Error::frozen_client(client_id.clone())); - } - - let consensus_state = ctx.client_consensus_state(client_id, proofs.height())?; - - let client_def = AnyClient::from_client_type(client_state.client_type()); - - // Verify the proof for the packet against the chain store. - client_def - .verify_next_sequence_recv( - ctx, - &client_state, - height, - connection_end, - proofs.object_proof(), - consensus_state.root(), - &packet.destination_port, - &packet.destination_channel, - packet.sequence, - ) - .map_err(|e| Error::packet_verification_failed(seq, e))?; - - Ok(()) +pub fn verify_next_sequence_recv( + ctx: &Ctx, + height: Height, + connection_end: &ConnectionEnd, + packet: Packet, + seq: Sequence, + proofs: &Proofs, +) -> Result<(), Error> +where + Ctx: ReaderContext, +{ + let client_id = connection_end.client_id(); + let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; + + // The client must not be frozen. + if client_state.is_frozen() { + return Err(Error::frozen_client(client_id.clone())) + } + + let consensus_state = ctx + .consensus_state(client_id, proofs.height()) + .map_err(|_| Error::error_invalid_consensus_state())?; + + let client_def = client_state.client_def(); + + // Verify the proof for the packet against the chain store. + client_def + .verify_next_sequence_recv( + ctx, + client_id, + &client_state, + height, + connection_end, + proofs.object_proof(), + consensus_state.root(), + &packet.destination_port, + &packet.destination_channel, + packet.sequence, + ) + .map_err(|e| Error::packet_verification_failed(seq, e))?; + + Ok(()) } -pub fn verify_packet_receipt_absence( - ctx: &dyn ChannelReader, - height: Height, - connection_end: &ConnectionEnd, - packet: Packet, - proofs: &Proofs, -) -> Result<(), Error> { - let client_id = connection_end.client_id(); - let client_state = ctx.client_state(client_id)?; - - // The client must not be frozen. - if client_state.is_frozen() { - return Err(Error::frozen_client(client_id.clone())); - } - - let consensus_state = ctx.client_consensus_state(client_id, proofs.height())?; - - let client_def = AnyClient::from_client_type(client_state.client_type()); - - // Verify the proof for the packet against the chain store. - client_def - .verify_packet_receipt_absence( - ctx, - &client_state, - height, - connection_end, - proofs.object_proof(), - consensus_state.root(), - &packet.destination_port, - &packet.destination_channel, - packet.sequence, - ) - .map_err(|e| Error::packet_verification_failed(packet.sequence, e))?; - - Ok(()) +pub fn verify_packet_receipt_absence( + ctx: &Ctx, + height: Height, + connection_end: &ConnectionEnd, + packet: Packet, + proofs: &Proofs, +) -> Result<(), Error> +where + Ctx: ReaderContext, +{ + let client_id = connection_end.client_id(); + let client_state = ctx.client_state(client_id).map_err(Error::ics02_client)?; + + // The client must not be frozen. + if client_state.is_frozen() { + return Err(Error::frozen_client(client_id.clone())) + } + + let consensus_state = ctx + .consensus_state(client_id, proofs.height()) + .map_err(|_| Error::error_invalid_consensus_state())?; + + let client_def = client_state.client_def(); + + // Verify the proof for the packet against the chain store. + client_def + .verify_packet_receipt_absence( + ctx, + client_id, + &client_state, + height, + connection_end, + proofs.object_proof(), + consensus_state.root(), + &packet.destination_port, + &packet.destination_channel, + packet.sequence, + ) + .map_err(|e| Error::packet_verification_failed(packet.sequence, e))?; + + Ok(()) } diff --git a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs index 3bf30c57c0..1d5d838c11 100644 --- a/modules/src/core/ics04_channel/handler/write_acknowledgement.rs +++ b/modules/src/core/ics04_channel/handler/write_acknowledgement.rs @@ -1,181 +1,190 @@ -use crate::core::ics04_channel::channel::State; -use crate::core::ics04_channel::commitment::AcknowledgementCommitment; -use crate::core::ics04_channel::events::WriteAcknowledgement; -use crate::core::ics04_channel::packet::{Packet, PacketResult, Sequence}; -use crate::core::ics04_channel::{context::ChannelReader, error::Error}; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::prelude::*; use crate::{ - events::IbcEvent, - handler::{HandlerOutput, HandlerResult}, + core::{ + ics04_channel::{ + channel::State, + commitment::AcknowledgementCommitment, + error::Error, + events::WriteAcknowledgement, + packet::{Packet, PacketResult, Sequence}, + }, + ics24_host::identifier::{ChannelId, PortId}, + ics26_routing::context::ReaderContext, + }, + events::IbcEvent, + handler::{HandlerOutput, HandlerResult}, + prelude::*, }; #[derive(Clone, Debug)] pub struct WriteAckPacketResult { - pub port_id: PortId, - pub channel_id: ChannelId, - pub seq: Sequence, - pub ack_commitment: AcknowledgementCommitment, + pub port_id: PortId, + pub channel_id: ChannelId, + pub seq: Sequence, + pub ack_commitment: AcknowledgementCommitment, } -pub fn process( - ctx: &dyn ChannelReader, - packet: Packet, - ack: Vec, +pub fn process( + ctx: &Ctx, + packet: Packet, + ack: Vec, ) -> HandlerResult { - let mut output = HandlerOutput::builder(); - - let dest_channel_end = - ctx.channel_end(&(packet.destination_port.clone(), packet.destination_channel))?; - - if !dest_channel_end.state_matches(&State::Open) { - return Err(Error::invalid_channel_state( - packet.source_channel, - dest_channel_end.state, - )); - } - - // NOTE: IBC app modules might have written the acknowledgement synchronously on - // the OnRecvPacket callback so we need to check if the acknowledgement is already - // set on the store and return an error if so. - match ctx.get_packet_acknowledgement(&( - packet.destination_port.clone(), - packet.destination_channel, - packet.sequence, - )) { - Ok(_) => return Err(Error::acknowledgement_exists(packet.sequence)), - Err(e) - if e.detail() == Error::packet_acknowledgement_not_found(packet.sequence).detail() => {} - Err(e) => return Err(e), - } - - if ack.is_empty() { - return Err(Error::invalid_acknowledgement()); - } - - let result = PacketResult::WriteAck(WriteAckPacketResult { - port_id: packet.source_port.clone(), - channel_id: packet.source_channel, - seq: packet.sequence, - ack_commitment: ctx.ack_commitment(ack.clone().into()), - }); - - output.log("success: packet write acknowledgement"); - - output.emit(IbcEvent::WriteAcknowledgement(WriteAcknowledgement { - height: ctx.host_height(), - packet, - ack, - })); - - Ok(output.with_result(result)) + let mut output = HandlerOutput::builder(); + + let dest_channel_end = + ctx.channel_end(&(packet.destination_port.clone(), packet.destination_channel))?; + + if !dest_channel_end.state_matches(&State::Open) { + return Err(Error::invalid_channel_state(packet.source_channel, dest_channel_end.state)) + } + + // NOTE: IBC app modules might have written the acknowledgement synchronously on + // the OnRecvPacket callback so we need to check if the acknowledgement is already + // set on the store and return an error if so. + match ctx.get_packet_acknowledgement(&( + packet.destination_port.clone(), + packet.destination_channel, + packet.sequence, + )) { + Ok(_) => return Err(Error::acknowledgement_exists(packet.sequence)), + Err(e) + if e.detail() == Error::packet_acknowledgement_not_found(packet.sequence).detail() => {}, + Err(e) => return Err(e), + } + + if ack.is_empty() { + return Err(Error::invalid_acknowledgement()) + } + + let result = PacketResult::WriteAck(WriteAckPacketResult { + port_id: packet.source_port.clone(), + channel_id: packet.source_channel, + seq: packet.sequence, + ack_commitment: ctx.ack_commitment(ack.clone().into()), + }); + + output.log("success: packet write acknowledgement"); + + output.emit(IbcEvent::WriteAcknowledgement(WriteAcknowledgement { + height: ctx.host_height(), + packet, + ack, + })); + + Ok(output.with_result(result)) } #[cfg(test)] mod tests { - use crate::core::ics04_channel::context::ChannelReader; - use crate::prelude::*; - - use test_log::test; - - use crate::core::ics02_client::height::Height; - use crate::core::ics03_connection::connection::ConnectionEnd; - use crate::core::ics03_connection::connection::Counterparty as ConnectionCounterparty; - use crate::core::ics03_connection::connection::State as ConnectionState; - use crate::core::ics03_connection::version::get_compatible_versions; - use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order, State}; - use crate::core::ics04_channel::handler::write_acknowledgement::process; - use crate::core::ics04_channel::packet::test_utils::get_dummy_raw_packet; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; - use crate::mock::context::MockContext; - use crate::timestamp::ZERO_DURATION; - use crate::{core::ics04_channel::packet::Packet, events::IbcEvent}; - - #[test] - fn write_ack_packet_processing() { - struct Test { - name: String, - ctx: MockContext, - packet: Packet, - ack: Vec, - want_pass: bool, - } - - let context = MockContext::default(); - - let client_height = Height::new(0, 1); - - let mut packet: Packet = get_dummy_raw_packet(1, 6).try_into().unwrap(); - packet.sequence = 1.into(); - packet.data = vec![0]; - - let ack = vec![0]; - let ack_null = Vec::new(); - - let dest_channel_end = ChannelEnd::new( - State::Open, - Order::default(), - Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)), - vec![ConnectionId::default()], - Version::ics20(), - ); - - let connection_end = ConnectionEnd::new( - ConnectionState::Open, - ClientId::default(), - ConnectionCounterparty::new( - ClientId::default(), - Some(ConnectionId::default()), - Default::default(), - ), - get_compatible_versions(), - ZERO_DURATION, - ); - - let tests: Vec = vec![ - Test { - name: "Processing fails because no channel exists in the context".to_string(), - ctx: context.clone(), - packet: packet.clone(), - ack: ack.clone(), - want_pass: false, - }, - Test { - name: "Good parameters".to_string(), - ctx: context - .clone() - .with_client(&ClientId::default(), client_height) - .with_connection(ConnectionId::default(), connection_end.clone()) - .with_channel( - packet.destination_port.clone(), - packet.destination_channel, - dest_channel_end.clone(), - ), - packet: packet.clone(), - ack, - want_pass: true, - }, - Test { - name: "Zero ack".to_string(), - ctx: context - .with_client(&ClientId::default(), Height::default()) - .with_connection(ConnectionId::default(), connection_end) - .with_channel(PortId::default(), ChannelId::default(), dest_channel_end), - packet, - ack: ack_null, - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = process(&test.ctx, test.packet.clone(), test.ack); - // Additionally check the events and the output objects in the result. - match res { - Ok(proto_output) => { - assert!( + use crate::prelude::*; + + use test_log::test; + + use crate::{ + core::{ + ics02_client::{context::ClientReader, height::Height}, + ics03_connection::{ + connection::{ + ConnectionEnd, Counterparty as ConnectionCounterparty, State as ConnectionState, + }, + version::get_compatible_versions, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order, State}, + handler::write_acknowledgement::process, + packet::{test_utils::get_dummy_raw_packet, Packet}, + Version, + }, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + }, + events::IbcEvent, + mock::context::{MockClientTypes, MockContext}, + timestamp::ZERO_DURATION, + }; + + #[test] + fn write_ack_packet_processing() { + struct Test { + name: String, + ctx: MockContext, + packet: Packet, + ack: Vec, + want_pass: bool, + } + + let context = MockContext::default(); + + let client_height = Height::new(0, 1); + + let mut packet: Packet = get_dummy_raw_packet(1, 6).try_into().unwrap(); + packet.sequence = 1.into(); + packet.data = vec![0]; + + let ack = vec![0]; + let ack_null = Vec::new(); + + let dest_channel_end = ChannelEnd::new( + State::Open, + Order::default(), + Counterparty::new(packet.source_port.clone(), Some(packet.source_channel)), + vec![ConnectionId::default()], + Version::ics20(), + ); + + let connection_end = ConnectionEnd::new( + ConnectionState::Open, + ClientId::default(), + ConnectionCounterparty::new( + ClientId::default(), + Some(ConnectionId::default()), + Default::default(), + ), + get_compatible_versions(), + ZERO_DURATION, + ); + + let tests: Vec = vec![ + Test { + name: "Processing fails because no channel exists in the context".to_string(), + ctx: context.clone(), + packet: packet.clone(), + ack: ack.clone(), + want_pass: false, + }, + Test { + name: "Good parameters".to_string(), + ctx: context + .clone() + .with_client(&ClientId::default(), client_height) + .with_connection(ConnectionId::default(), connection_end.clone()) + .with_channel( + packet.destination_port.clone(), + packet.destination_channel, + dest_channel_end.clone(), + ), + packet: packet.clone(), + ack, + want_pass: true, + }, + Test { + name: "Zero ack".to_string(), + ctx: context + .with_client(&ClientId::default(), Height::default()) + .with_connection(ConnectionId::default(), connection_end) + .with_channel(PortId::default(), ChannelId::default(), dest_channel_end), + packet, + ack: ack_null, + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = process(&test.ctx, test.packet.clone(), test.ack); + // Additionally check the events and the output objects in the result. + match res { + Ok(proto_output) => { + assert!( test.want_pass, "write_ack: test passed but was supposed to fail for test: {}, \nparams {:?} {:?}", test.name, @@ -183,24 +192,24 @@ mod tests { test.ctx.clone() ); - assert!(!proto_output.events.is_empty()); // Some events must exist. - - for e in proto_output.events.iter() { - assert!(matches!(e, &IbcEvent::WriteAcknowledgement(_))); - assert_eq!(e.height(), test.ctx.host_height()); - } - } - Err(e) => { - assert!( - !test.want_pass, - "write_ack: did not pass test: {}, \nparams {:?} {:?} error: {:?}", - test.name, - test.packet.clone(), - test.ctx.clone(), - e, - ); - } - } - } - } + assert!(!proto_output.events.is_empty()); // Some events must exist. + + for e in proto_output.events.iter() { + assert!(matches!(e, &IbcEvent::WriteAcknowledgement(_))); + assert_eq!(e.height(), test.ctx.host_height()); + } + }, + Err(e) => { + assert!( + !test.want_pass, + "write_ack: did not pass test: {}, \nparams {:?} {:?} error: {:?}", + test.name, + test.packet.clone(), + test.ctx.clone(), + e, + ); + }, + } + } + } } diff --git a/modules/src/core/ics04_channel/msgs.rs b/modules/src/core/ics04_channel/msgs.rs index 0437b9cd16..e9b57ac0b5 100644 --- a/modules/src/core/ics04_channel/msgs.rs +++ b/modules/src/core/ics04_channel/msgs.rs @@ -1,18 +1,19 @@ //! Message definitions for all ICS4 domain types: channel open & close handshake datagrams, as well //! as packets. -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; -use crate::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; -use crate::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; -use crate::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; -use crate::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; -use crate::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; -use crate::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; -use crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; -use crate::core::ics04_channel::msgs::timeout::MsgTimeout; -use crate::core::ics04_channel::msgs::timeout_on_close::MsgTimeoutOnClose; -use crate::core::ics26_routing::context::{Ics26Context, ModuleId}; +use crate::core::{ + ics04_channel::{ + error::Error, + msgs::{ + acknowledgement::MsgAcknowledgement, chan_close_confirm::MsgChannelCloseConfirm, + chan_close_init::MsgChannelCloseInit, chan_open_ack::MsgChannelOpenAck, + chan_open_confirm::MsgChannelOpenConfirm, chan_open_init::MsgChannelOpenInit, + chan_open_try::MsgChannelOpenTry, recv_packet::MsgRecvPacket, timeout::MsgTimeout, + timeout_on_close::MsgTimeoutOnClose, + }, + }, + ics26_routing::context::{Ics26Context, ModuleId}, +}; // Opening handshake messages. pub mod chan_open_ack; @@ -33,44 +34,38 @@ pub mod timeout_on_close; /// Enumeration of all possible messages that the ICS4 protocol processes. #[derive(Clone, Debug, PartialEq)] pub enum ChannelMsg { - ChannelOpenInit(MsgChannelOpenInit), - ChannelOpenTry(MsgChannelOpenTry), - ChannelOpenAck(MsgChannelOpenAck), - ChannelOpenConfirm(MsgChannelOpenConfirm), - ChannelCloseInit(MsgChannelCloseInit), - ChannelCloseConfirm(MsgChannelCloseConfirm), + ChannelOpenInit(MsgChannelOpenInit), + ChannelOpenTry(MsgChannelOpenTry), + ChannelOpenAck(MsgChannelOpenAck), + ChannelOpenConfirm(MsgChannelOpenConfirm), + ChannelCloseInit(MsgChannelCloseInit), + ChannelCloseConfirm(MsgChannelCloseConfirm), } impl ChannelMsg { - pub(super) fn lookup_module(&self, ctx: &impl Ics26Context) -> Result { - let module_id = match self { - ChannelMsg::ChannelOpenInit(msg) => ctx - .lookup_module_by_port(&msg.port_id) - .map_err(Error::ics05_port)?, - ChannelMsg::ChannelOpenTry(msg) => ctx - .lookup_module_by_port(&msg.port_id) - .map_err(Error::ics05_port)?, - ChannelMsg::ChannelOpenAck(msg) => ctx - .lookup_module_by_port(&msg.port_id) - .map_err(Error::ics05_port)?, - ChannelMsg::ChannelOpenConfirm(msg) => ctx - .lookup_module_by_port(&msg.port_id) - .map_err(Error::ics05_port)?, - ChannelMsg::ChannelCloseInit(msg) => ctx - .lookup_module_by_port(&msg.port_id) - .map_err(Error::ics05_port)?, - ChannelMsg::ChannelCloseConfirm(msg) => ctx - .lookup_module_by_port(&msg.port_id) - .map_err(Error::ics05_port)?, - }; - Ok(module_id) - } + pub(super) fn lookup_module(&self, ctx: &impl Ics26Context) -> Result { + let module_id = match self { + ChannelMsg::ChannelOpenInit(msg) => + ctx.lookup_module_by_port(&msg.port_id).map_err(Error::ics05_port)?, + ChannelMsg::ChannelOpenTry(msg) => + ctx.lookup_module_by_port(&msg.port_id).map_err(Error::ics05_port)?, + ChannelMsg::ChannelOpenAck(msg) => + ctx.lookup_module_by_port(&msg.port_id).map_err(Error::ics05_port)?, + ChannelMsg::ChannelOpenConfirm(msg) => + ctx.lookup_module_by_port(&msg.port_id).map_err(Error::ics05_port)?, + ChannelMsg::ChannelCloseInit(msg) => + ctx.lookup_module_by_port(&msg.port_id).map_err(Error::ics05_port)?, + ChannelMsg::ChannelCloseConfirm(msg) => + ctx.lookup_module_by_port(&msg.port_id).map_err(Error::ics05_port)?, + }; + Ok(module_id) + } } #[derive(Clone, Debug, PartialEq)] pub enum PacketMsg { - RecvPacket(MsgRecvPacket), - AckPacket(MsgAcknowledgement), - ToPacket(MsgTimeout), - ToClosePacket(MsgTimeoutOnClose), + RecvPacket(MsgRecvPacket), + AckPacket(MsgAcknowledgement), + ToPacket(MsgTimeout), + ToClosePacket(MsgTimeoutOnClose), } diff --git a/modules/src/core/ics04_channel/msgs/acknowledgement.rs b/modules/src/core/ics04_channel/msgs/acknowledgement.rs index 95bf93279f..4d3ece1fc5 100644 --- a/modules/src/core/ics04_channel/msgs/acknowledgement.rs +++ b/modules/src/core/ics04_channel/msgs/acknowledgement.rs @@ -4,11 +4,12 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::core::channel::v1::MsgAcknowledgement as RawMsgAcknowledgement; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::packet::Packet; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::ics04_channel::{error::Error, packet::Packet}, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgAcknowledgement"; @@ -17,212 +18,193 @@ pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgAcknowledgement"; pub struct Acknowledgement(Vec); impl Acknowledgement { - pub fn into_bytes(self) -> Vec { - self.0 - } + pub fn into_bytes(self) -> Vec { + self.0 + } - pub fn from_bytes(bytes: Vec) -> Self { - bytes.into() - } + pub fn from_bytes(bytes: Vec) -> Self { + bytes.into() + } } impl From> for Acknowledgement { - fn from(bytes: Vec) -> Self { - Self(bytes) - } + fn from(bytes: Vec) -> Self { + Self(bytes) + } } impl AsRef<[u8]> for Acknowledgement { - fn as_ref(&self) -> &[u8] { - self.0.as_slice() - } + fn as_ref(&self) -> &[u8] { + self.0.as_slice() + } } /// /// Message definition for packet acknowledgements. -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgAcknowledgement { - pub packet: Packet, - pub acknowledgement: Acknowledgement, - pub proofs: Proofs, - pub signer: Signer, + pub packet: Packet, + pub acknowledgement: Acknowledgement, + pub proofs: Proofs, + pub signer: Signer, } impl MsgAcknowledgement { - pub fn new( - packet: Packet, - acknowledgement: Acknowledgement, - proofs: Proofs, - signer: Signer, - ) -> MsgAcknowledgement { - Self { - packet, - acknowledgement, - proofs, - signer, - } - } - - pub fn acknowledgement(&self) -> &Acknowledgement { - &self.acknowledgement - } - - pub fn proofs(&self) -> &Proofs { - &self.proofs - } + pub fn new( + packet: Packet, + acknowledgement: Acknowledgement, + proofs: Proofs, + signer: Signer, + ) -> MsgAcknowledgement { + Self { packet, acknowledgement, proofs, signer } + } + + pub fn acknowledgement(&self) -> &Acknowledgement { + &self.acknowledgement + } + + pub fn proofs(&self) -> &Proofs { + &self.proofs + } } impl Msg for MsgAcknowledgement { - type ValidationError = Error; - type Raw = RawMsgAcknowledgement; + type ValidationError = Error; + type Raw = RawMsgAcknowledgement; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgAcknowledgement {} impl TryFrom for MsgAcknowledgement { - type Error = Error; - - fn try_from(raw_msg: RawMsgAcknowledgement) -> Result { - let proofs = Proofs::new( - raw_msg - .proof_acked - .try_into() - .map_err(Error::invalid_proof)?, - None, - None, - None, - raw_msg - .proof_height - .ok_or_else(Error::missing_height)? - .into(), - ) - .map_err(Error::invalid_proof)?; - - Ok(MsgAcknowledgement { - packet: raw_msg - .packet - .ok_or_else(Error::missing_packet)? - .try_into()?, - acknowledgement: raw_msg.acknowledgement.into(), - signer: raw_msg.signer.parse().map_err(Error::signer)?, - proofs, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgAcknowledgement) -> Result { + let proofs = Proofs::new( + raw_msg.proof_acked.try_into().map_err(Error::invalid_proof)?, + None, + None, + None, + raw_msg.proof_height.ok_or_else(Error::missing_height)?.into(), + ) + .map_err(Error::invalid_proof)?; + + Ok(MsgAcknowledgement { + packet: raw_msg.packet.ok_or_else(Error::missing_packet)?.try_into()?, + acknowledgement: raw_msg.acknowledgement.into(), + signer: raw_msg.signer.parse().map_err(Error::signer)?, + proofs, + }) + } } impl From for RawMsgAcknowledgement { - fn from(domain_msg: MsgAcknowledgement) -> Self { - RawMsgAcknowledgement { - packet: Some(domain_msg.packet.into()), - acknowledgement: domain_msg.acknowledgement.into_bytes(), - signer: domain_msg.signer.to_string(), - proof_height: Some(domain_msg.proofs.height().into()), - proof_acked: domain_msg.proofs.object_proof().clone().into(), - } - } + fn from(domain_msg: MsgAcknowledgement) -> Self { + RawMsgAcknowledgement { + packet: Some(domain_msg.packet.into()), + acknowledgement: domain_msg.acknowledgement.into_bytes(), + signer: domain_msg.signer.to_string(), + proof_height: Some(domain_msg.proofs.height().into()), + proof_acked: domain_msg.proofs.object_proof().clone().into(), + } + } } #[cfg(test)] pub mod test_util { - use ibc_proto::ibc::core::channel::v1::MsgAcknowledgement as RawMsgAcknowledgement; - use ibc_proto::ibc::core::client::v1::Height as RawHeight; - - use crate::core::ics04_channel::packet::test_utils::get_dummy_raw_packet; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - - /// Returns a dummy `RawMsgAcknowledgement`, for testing only! - /// The `height` parametrizes both the proof height as well as the timeout height. - pub fn get_dummy_raw_msg_acknowledgement(height: u64) -> RawMsgAcknowledgement { - RawMsgAcknowledgement { - packet: Some(get_dummy_raw_packet(height, 1)), - acknowledgement: get_dummy_proof(), - proof_acked: get_dummy_proof(), - proof_height: Some(RawHeight { - revision_number: 0, - revision_height: height, - }), - signer: get_dummy_bech32_account(), - } - } + use ibc_proto::ibc::core::{ + channel::v1::MsgAcknowledgement as RawMsgAcknowledgement, client::v1::Height as RawHeight, + }; + + use crate::{ + core::ics04_channel::packet::test_utils::get_dummy_raw_packet, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + + /// Returns a dummy `RawMsgAcknowledgement`, for testing only! + /// The `height` parametrizes both the proof height as well as the timeout height. + pub fn get_dummy_raw_msg_acknowledgement(height: u64) -> RawMsgAcknowledgement { + RawMsgAcknowledgement { + packet: Some(get_dummy_raw_packet(height, 1)), + acknowledgement: get_dummy_proof(), + proof_acked: get_dummy_proof(), + proof_height: Some(RawHeight { revision_number: 0, revision_height: height }), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod test { - use crate::prelude::*; - - use test_log::test; - - use ibc_proto::ibc::core::channel::v1::MsgAcknowledgement as RawMsgAcknowledgement; - - use crate::core::ics04_channel::error::Error; - use crate::core::ics04_channel::msgs::acknowledgement::test_util::get_dummy_raw_msg_acknowledgement; - use crate::core::ics04_channel::msgs::acknowledgement::MsgAcknowledgement; - use crate::test_utils::get_dummy_bech32_account; - - #[test] - fn msg_acknowledgment_try_from_raw() { - struct Test { - name: String, - raw: RawMsgAcknowledgement, - want_pass: bool, - } - - let height = 50; - let default_raw_msg = get_dummy_raw_msg_acknowledgement(height); - - let tests: Vec = vec![ - Test { - name: "Good parameters".to_string(), - raw: default_raw_msg.clone(), - want_pass: true, - }, - Test { - name: "Missing packet".to_string(), - raw: RawMsgAcknowledgement { - packet: None, - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Missing proof height".to_string(), - raw: RawMsgAcknowledgement { - proof_height: None, - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Empty signer".to_string(), - raw: RawMsgAcknowledgement { - signer: get_dummy_bech32_account(), - ..default_raw_msg.clone() - }, - want_pass: true, - }, - Test { - name: "Empty proof acked".to_string(), - raw: RawMsgAcknowledgement { - proof_acked: Vec::new(), - ..default_raw_msg - }, - want_pass: false, - }, - ]; - - for test in tests { - let res_msg: Result = test.raw.clone().try_into(); - - assert_eq!( + use crate::prelude::*; + + use test_log::test; + + use ibc_proto::ibc::core::channel::v1::MsgAcknowledgement as RawMsgAcknowledgement; + + use crate::{ + core::ics04_channel::{ + error::Error, + msgs::acknowledgement::{ + test_util::get_dummy_raw_msg_acknowledgement, MsgAcknowledgement, + }, + }, + test_utils::get_dummy_bech32_account, + }; + + #[test] + fn msg_acknowledgment_try_from_raw() { + struct Test { + name: String, + raw: RawMsgAcknowledgement, + want_pass: bool, + } + + let height = 50; + let default_raw_msg = get_dummy_raw_msg_acknowledgement(height); + + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_raw_msg.clone(), + want_pass: true, + }, + Test { + name: "Missing packet".to_string(), + raw: RawMsgAcknowledgement { packet: None, ..default_raw_msg.clone() }, + want_pass: false, + }, + Test { + name: "Missing proof height".to_string(), + raw: RawMsgAcknowledgement { proof_height: None, ..default_raw_msg.clone() }, + want_pass: false, + }, + Test { + name: "Empty signer".to_string(), + raw: RawMsgAcknowledgement { + signer: get_dummy_bech32_account(), + ..default_raw_msg.clone() + }, + want_pass: true, + }, + Test { + name: "Empty proof acked".to_string(), + raw: RawMsgAcknowledgement { proof_acked: Vec::new(), ..default_raw_msg }, + want_pass: false, + }, + ]; + + for test in tests { + let res_msg: Result = test.raw.clone().try_into(); + + assert_eq!( res_msg.is_ok(), test.want_pass, "MsgAcknowledgement::try_from failed for test {} \nraw message: {:?} with error: {:?}", @@ -230,6 +212,6 @@ mod test { test.raw, res_msg.err() ); - } - } + } + } } diff --git a/modules/src/core/ics04_channel/msgs/chan_close_confirm.rs b/modules/src/core/ics04_channel/msgs/chan_close_confirm.rs index 2dc6bd27dc..70eab6b1d3 100644 --- a/modules/src/core/ics04_channel/msgs/chan_close_confirm.rs +++ b/modules/src/core/ics04_channel/msgs/chan_close_confirm.rs @@ -4,138 +4,132 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::core::channel::v1::MsgChannelCloseConfirm as RawMsgChannelCloseConfirm; -use crate::core::ics04_channel::error::Error; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ + ics04_channel::error::Error, + ics24_host::identifier::{ChannelId, PortId}, + }, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgChannelCloseConfirm"; /// /// Message definition for the second step in the channel close handshake (the `ChanCloseConfirm` /// datagram). -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgChannelCloseConfirm { - pub port_id: PortId, - pub channel_id: ChannelId, - pub proofs: Proofs, - pub signer: Signer, + pub port_id: PortId, + pub channel_id: ChannelId, + pub proofs: Proofs, + pub signer: Signer, } impl MsgChannelCloseConfirm { - pub fn new(port_id: PortId, channel_id: ChannelId, proofs: Proofs, signer: Signer) -> Self { - Self { - port_id, - channel_id, - proofs, - signer, - } - } + pub fn new(port_id: PortId, channel_id: ChannelId, proofs: Proofs, signer: Signer) -> Self { + Self { port_id, channel_id, proofs, signer } + } } impl Msg for MsgChannelCloseConfirm { - type ValidationError = Error; - type Raw = RawMsgChannelCloseConfirm; + type ValidationError = Error; + type Raw = RawMsgChannelCloseConfirm; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgChannelCloseConfirm {} impl TryFrom for MsgChannelCloseConfirm { - type Error = Error; - - fn try_from(raw_msg: RawMsgChannelCloseConfirm) -> Result { - let proofs = Proofs::new( - raw_msg - .proof_init - .try_into() - .map_err(Error::invalid_proof)?, - None, - None, - None, - raw_msg - .proof_height - .ok_or_else(Error::missing_height)? - .into(), - ) - .map_err(Error::invalid_proof)?; - - Ok(MsgChannelCloseConfirm { - port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, - channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, - proofs, - signer: raw_msg.signer.parse().map_err(Error::signer)?, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgChannelCloseConfirm) -> Result { + let proofs = Proofs::new( + raw_msg.proof_init.try_into().map_err(Error::invalid_proof)?, + None, + None, + None, + raw_msg.proof_height.ok_or_else(Error::missing_height)?.into(), + ) + .map_err(Error::invalid_proof)?; + + Ok(MsgChannelCloseConfirm { + port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, + channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, + proofs, + signer: raw_msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgChannelCloseConfirm { - fn from(domain_msg: MsgChannelCloseConfirm) -> Self { - RawMsgChannelCloseConfirm { - port_id: domain_msg.port_id.to_string(), - channel_id: domain_msg.channel_id.to_string(), - proof_init: domain_msg.proofs.object_proof().clone().into(), - proof_height: Some(domain_msg.proofs.height().into()), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgChannelCloseConfirm) -> Self { + RawMsgChannelCloseConfirm { + port_id: domain_msg.port_id.to_string(), + channel_id: domain_msg.channel_id.to_string(), + proof_init: domain_msg.proofs.object_proof().clone().into(), + proof_height: Some(domain_msg.proofs.height().into()), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelCloseConfirm as RawMsgChannelCloseConfirm; - use ibc_proto::ibc::core::client::v1::Height; - - use crate::core::ics24_host::identifier::{ChannelId, PortId}; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - - /// Returns a dummy `RawMsgChannelCloseConfirm`, for testing only! - pub fn get_dummy_raw_msg_chan_close_confirm(proof_height: u64) -> RawMsgChannelCloseConfirm { - RawMsgChannelCloseConfirm { - port_id: PortId::default().to_string(), - channel_id: ChannelId::default().to_string(), - proof_init: get_dummy_proof(), - proof_height: Some(Height { - revision_number: 0, - revision_height: proof_height, - }), - signer: get_dummy_bech32_account(), - } - } + use crate::prelude::*; + use ibc_proto::ibc::core::{ + channel::v1::MsgChannelCloseConfirm as RawMsgChannelCloseConfirm, client::v1::Height, + }; + + use crate::{ + core::ics24_host::identifier::{ChannelId, PortId}, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + + /// Returns a dummy `RawMsgChannelCloseConfirm`, for testing only! + pub fn get_dummy_raw_msg_chan_close_confirm(proof_height: u64) -> RawMsgChannelCloseConfirm { + RawMsgChannelCloseConfirm { + port_id: PortId::default().to_string(), + channel_id: ChannelId::default().to_string(), + proof_init: get_dummy_proof(), + proof_height: Some(Height { revision_number: 0, revision_height: proof_height }), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; + use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelCloseConfirm as RawMsgChannelCloseConfirm; - use ibc_proto::ibc::core::client::v1::Height; + use ibc_proto::ibc::core::{ + channel::v1::MsgChannelCloseConfirm as RawMsgChannelCloseConfirm, client::v1::Height, + }; - use crate::core::ics04_channel::msgs::chan_close_confirm::test_util::get_dummy_raw_msg_chan_close_confirm; - use crate::core::ics04_channel::msgs::chan_close_confirm::MsgChannelCloseConfirm; + use crate::core::ics04_channel::msgs::chan_close_confirm::{ + test_util::get_dummy_raw_msg_chan_close_confirm, MsgChannelCloseConfirm, + }; - #[test] - fn parse_channel_close_confirm_msg() { - struct Test { - name: String, - raw: RawMsgChannelCloseConfirm, - want_pass: bool, - } + #[test] + fn parse_channel_close_confirm_msg() { + struct Test { + name: String, + raw: RawMsgChannelCloseConfirm, + want_pass: bool, + } - let proof_height = 10; - let default_raw_msg = get_dummy_raw_msg_chan_close_confirm(proof_height); + let proof_height = 10; + let default_raw_msg = get_dummy_raw_msg_chan_close_confirm(proof_height); - let tests: Vec = vec![ + let tests: Vec = vec![ Test { name: "Good parameters".to_string(), raw: default_raw_msg.clone(), @@ -208,10 +202,10 @@ mod tests { .into_iter() .collect(); - for test in tests { - let msg = MsgChannelCloseConfirm::try_from(test.raw.clone()); + for test in tests { + let msg = MsgChannelCloseConfirm::try_from(test.raw.clone()); - assert_eq!( + assert_eq!( test.want_pass, msg.is_ok(), "MsgChanCloseConfirm::try_from raw failed for test {}, \nraw msg {:?} with error {:?}", @@ -219,16 +213,16 @@ mod tests { test.raw, msg.err(), ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_chan_close_confirm(19); - let msg = MsgChannelCloseConfirm::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgChannelCloseConfirm::from(msg.clone()); - let msg_back = MsgChannelCloseConfirm::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_chan_close_confirm(19); + let msg = MsgChannelCloseConfirm::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgChannelCloseConfirm::from(msg.clone()); + let msg_back = MsgChannelCloseConfirm::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/chan_close_init.rs b/modules/src/core/ics04_channel/msgs/chan_close_init.rs index ceb85b5421..192e3d04ae 100644 --- a/modules/src/core/ics04_channel/msgs/chan_close_init.rs +++ b/modules/src/core/ics04_channel/msgs/chan_close_init.rs @@ -4,110 +4,112 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::core::channel::v1::MsgChannelCloseInit as RawMsgChannelCloseInit; -use crate::core::ics04_channel::error::Error; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ + ics04_channel::error::Error, + ics24_host::identifier::{ChannelId, PortId}, + }, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgChannelCloseInit"; /// /// Message definition for the first step in the channel close handshake (`ChanCloseInit` datagram). -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgChannelCloseInit { - pub port_id: PortId, - pub channel_id: ChannelId, - pub signer: Signer, + pub port_id: PortId, + pub channel_id: ChannelId, + pub signer: Signer, } impl MsgChannelCloseInit { - pub fn new(port_id: PortId, channel_id: ChannelId, signer: Signer) -> Self { - Self { - port_id, - channel_id, - signer, - } - } + pub fn new(port_id: PortId, channel_id: ChannelId, signer: Signer) -> Self { + Self { port_id, channel_id, signer } + } } impl Msg for MsgChannelCloseInit { - type ValidationError = Error; - type Raw = RawMsgChannelCloseInit; + type ValidationError = Error; + type Raw = RawMsgChannelCloseInit; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgChannelCloseInit {} impl TryFrom for MsgChannelCloseInit { - type Error = Error; - - fn try_from(raw_msg: RawMsgChannelCloseInit) -> Result { - Ok(MsgChannelCloseInit { - port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, - channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, - signer: raw_msg.signer.parse().map_err(Error::signer)?, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgChannelCloseInit) -> Result { + Ok(MsgChannelCloseInit { + port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, + channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, + signer: raw_msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgChannelCloseInit { - fn from(domain_msg: MsgChannelCloseInit) -> Self { - RawMsgChannelCloseInit { - port_id: domain_msg.port_id.to_string(), - channel_id: domain_msg.channel_id.to_string(), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgChannelCloseInit) -> Self { + RawMsgChannelCloseInit { + port_id: domain_msg.port_id.to_string(), + channel_id: domain_msg.channel_id.to_string(), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelCloseInit as RawMsgChannelCloseInit; - - use crate::core::ics24_host::identifier::{ChannelId, PortId}; - use crate::test_utils::get_dummy_bech32_account; - - /// Returns a dummy `RawMsgChannelCloseInit`, for testing only! - pub fn get_dummy_raw_msg_chan_close_init() -> RawMsgChannelCloseInit { - RawMsgChannelCloseInit { - port_id: PortId::default().to_string(), - channel_id: ChannelId::default().to_string(), - signer: get_dummy_bech32_account(), - } - } + use crate::prelude::*; + use ibc_proto::ibc::core::channel::v1::MsgChannelCloseInit as RawMsgChannelCloseInit; + + use crate::{ + core::ics24_host::identifier::{ChannelId, PortId}, + test_utils::get_dummy_bech32_account, + }; + + /// Returns a dummy `RawMsgChannelCloseInit`, for testing only! + pub fn get_dummy_raw_msg_chan_close_init() -> RawMsgChannelCloseInit { + RawMsgChannelCloseInit { + port_id: PortId::default().to_string(), + channel_id: ChannelId::default().to_string(), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; + use crate::prelude::*; - use test_log::test; + use test_log::test; - use ibc_proto::ibc::core::channel::v1::MsgChannelCloseInit as RawMsgChannelCloseInit; + use ibc_proto::ibc::core::channel::v1::MsgChannelCloseInit as RawMsgChannelCloseInit; - use crate::core::ics04_channel::msgs::chan_close_init::test_util::get_dummy_raw_msg_chan_close_init; - use crate::core::ics04_channel::msgs::chan_close_init::MsgChannelCloseInit; + use crate::core::ics04_channel::msgs::chan_close_init::{ + test_util::get_dummy_raw_msg_chan_close_init, MsgChannelCloseInit, + }; - #[test] - fn parse_channel_close_init_msg() { - struct Test { - name: String, - raw: RawMsgChannelCloseInit, - want_pass: bool, - } + #[test] + fn parse_channel_close_init_msg() { + struct Test { + name: String, + raw: RawMsgChannelCloseInit, + want_pass: bool, + } - let default_raw_msg = get_dummy_raw_msg_chan_close_init(); + let default_raw_msg = get_dummy_raw_msg_chan_close_init(); - let tests: Vec = vec![ + let tests: Vec = vec![ Test { name: "Good parameters".to_string(), raw: default_raw_msg.clone(), @@ -165,27 +167,27 @@ mod tests { .into_iter() .collect(); - for test in tests { - let msg = MsgChannelCloseInit::try_from(test.raw.clone()); - - assert_eq!( - test.want_pass, - msg.is_ok(), - "MsgChanCloseInit::try_from failed for test {}, \nmsg {:?} with error {:?}", - test.name, - test.raw, - msg.err(), - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_chan_close_init(); - let msg = MsgChannelCloseInit::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgChannelCloseInit::from(msg.clone()); - let msg_back = MsgChannelCloseInit::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + for test in tests { + let msg = MsgChannelCloseInit::try_from(test.raw.clone()); + + assert_eq!( + test.want_pass, + msg.is_ok(), + "MsgChanCloseInit::try_from failed for test {}, \nmsg {:?} with error {:?}", + test.name, + test.raw, + msg.err(), + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_chan_close_init(); + let msg = MsgChannelCloseInit::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgChannelCloseInit::from(msg.clone()); + let msg_back = MsgChannelCloseInit::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/chan_open_ack.rs b/modules/src/core/ics04_channel/msgs/chan_open_ack.rs index 14cd6d85c6..a591e26e2f 100644 --- a/modules/src/core/ics04_channel/msgs/chan_open_ack.rs +++ b/modules/src/core/ics04_channel/msgs/chan_open_ack.rs @@ -1,10 +1,13 @@ -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::Version; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::prelude::*; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ + ics04_channel::{error::Error, Version}, + ics24_host::identifier::{ChannelId, PortId}, + }, + prelude::*, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; use ibc_proto::ibc::core::channel::v1::MsgChannelOpenAck as RawMsgChannelOpenAck; use tendermint_proto::Protobuf; @@ -13,145 +16,134 @@ pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgChannelOpenAck"; /// /// Message definition for the third step in the channel open handshake (`ChanOpenAck` datagram). -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgChannelOpenAck { - pub port_id: PortId, - pub channel_id: ChannelId, - pub counterparty_channel_id: ChannelId, - pub counterparty_version: Version, - pub proofs: Proofs, - pub signer: Signer, + pub port_id: PortId, + pub channel_id: ChannelId, + pub counterparty_channel_id: ChannelId, + pub counterparty_version: Version, + pub proofs: Proofs, + pub signer: Signer, } impl MsgChannelOpenAck { - pub fn new( - port_id: PortId, - channel_id: ChannelId, - counterparty_channel_id: ChannelId, - counterparty_version: Version, - proofs: Proofs, - signer: Signer, - ) -> Self { - Self { - port_id, - channel_id, - counterparty_channel_id, - counterparty_version, - proofs, - signer, - } - } + pub fn new( + port_id: PortId, + channel_id: ChannelId, + counterparty_channel_id: ChannelId, + counterparty_version: Version, + proofs: Proofs, + signer: Signer, + ) -> Self { + Self { port_id, channel_id, counterparty_channel_id, counterparty_version, proofs, signer } + } } impl Msg for MsgChannelOpenAck { - type ValidationError = Error; - type Raw = RawMsgChannelOpenAck; + type ValidationError = Error; + type Raw = RawMsgChannelOpenAck; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgChannelOpenAck {} impl TryFrom for MsgChannelOpenAck { - type Error = Error; + type Error = Error; - fn try_from(raw_msg: RawMsgChannelOpenAck) -> Result { - let proofs = Proofs::new( - raw_msg.proof_try.try_into().map_err(Error::invalid_proof)?, - None, - None, - None, - raw_msg - .proof_height - .ok_or_else(Error::missing_height)? - .into(), - ) - .map_err(Error::invalid_proof)?; + fn try_from(raw_msg: RawMsgChannelOpenAck) -> Result { + let proofs = Proofs::new( + raw_msg.proof_try.try_into().map_err(Error::invalid_proof)?, + None, + None, + None, + raw_msg.proof_height.ok_or_else(Error::missing_height)?.into(), + ) + .map_err(Error::invalid_proof)?; - Ok(MsgChannelOpenAck { - port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, - channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, - counterparty_channel_id: raw_msg - .counterparty_channel_id - .parse() - .map_err(Error::identifier)?, - counterparty_version: raw_msg.counterparty_version.into(), - proofs, - signer: raw_msg.signer.parse().map_err(Error::signer)?, - }) - } + Ok(MsgChannelOpenAck { + port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, + channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, + counterparty_channel_id: raw_msg + .counterparty_channel_id + .parse() + .map_err(Error::identifier)?, + counterparty_version: raw_msg.counterparty_version.into(), + proofs, + signer: raw_msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgChannelOpenAck { - fn from(domain_msg: MsgChannelOpenAck) -> Self { - RawMsgChannelOpenAck { - port_id: domain_msg.port_id.to_string(), - channel_id: domain_msg.channel_id.to_string(), - counterparty_channel_id: domain_msg.counterparty_channel_id.to_string(), - counterparty_version: domain_msg.counterparty_version.to_string(), - proof_try: domain_msg.proofs.object_proof().clone().into(), - proof_height: Some(domain_msg.proofs.height().into()), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgChannelOpenAck) -> Self { + RawMsgChannelOpenAck { + port_id: domain_msg.port_id.to_string(), + channel_id: domain_msg.channel_id.to_string(), + counterparty_channel_id: domain_msg.counterparty_channel_id.to_string(), + counterparty_version: domain_msg.counterparty_version.to_string(), + proof_try: domain_msg.proofs.object_proof().clone().into(), + proof_height: Some(domain_msg.proofs.height().into()), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenAck as RawMsgChannelOpenAck; + use crate::prelude::*; + use ibc_proto::ibc::core::channel::v1::MsgChannelOpenAck as RawMsgChannelOpenAck; - use crate::core::ics24_host::identifier::{ChannelId, PortId}; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - use ibc_proto::ibc::core::client::v1::Height; + use crate::{ + core::ics24_host::identifier::{ChannelId, PortId}, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + use ibc_proto::ibc::core::client::v1::Height; - /// Returns a dummy `RawMsgChannelOpenAck`, for testing only! - pub fn get_dummy_raw_msg_chan_open_ack(proof_height: u64) -> RawMsgChannelOpenAck { - RawMsgChannelOpenAck { - port_id: PortId::default().to_string(), - channel_id: ChannelId::default().to_string(), - counterparty_channel_id: ChannelId::default().to_string(), - counterparty_version: "".to_string(), - proof_try: get_dummy_proof(), - proof_height: Some(Height { - revision_number: 0, - revision_height: proof_height, - }), - signer: get_dummy_bech32_account(), - } - } + /// Returns a dummy `RawMsgChannelOpenAck`, for testing only! + pub fn get_dummy_raw_msg_chan_open_ack(proof_height: u64) -> RawMsgChannelOpenAck { + RawMsgChannelOpenAck { + port_id: PortId::default().to_string(), + channel_id: ChannelId::default().to_string(), + counterparty_channel_id: ChannelId::default().to_string(), + counterparty_version: "".to_string(), + proof_try: get_dummy_proof(), + proof_height: Some(Height { revision_number: 0, revision_height: proof_height }), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenAck as RawMsgChannelOpenAck; - use test_log::test; + use crate::prelude::*; + use ibc_proto::ibc::core::channel::v1::MsgChannelOpenAck as RawMsgChannelOpenAck; + use test_log::test; - use crate::core::ics04_channel::msgs::chan_open_ack::test_util::get_dummy_raw_msg_chan_open_ack; - use crate::core::ics04_channel::msgs::chan_open_ack::MsgChannelOpenAck; + use crate::core::ics04_channel::msgs::chan_open_ack::{ + test_util::get_dummy_raw_msg_chan_open_ack, MsgChannelOpenAck, + }; - use ibc_proto::ibc::core::client::v1::Height; + use ibc_proto::ibc::core::client::v1::Height; - #[test] - fn parse_channel_open_ack_msg() { - struct Test { - name: String, - raw: RawMsgChannelOpenAck, - want_pass: bool, - } + #[test] + fn parse_channel_open_ack_msg() { + struct Test { + name: String, + raw: RawMsgChannelOpenAck, + want_pass: bool, + } - let proof_height = 20; - let default_raw_msg = get_dummy_raw_msg_chan_open_ack(proof_height); + let proof_height = 20; + let default_raw_msg = get_dummy_raw_msg_chan_open_ack(proof_height); - let tests: Vec = vec![ + let tests: Vec = vec![ Test { name: "Good parameters".to_string(), raw: default_raw_msg.clone(), @@ -276,27 +268,27 @@ mod tests { .into_iter() .collect(); - for test in tests { - let res_msg = MsgChannelOpenAck::try_from(test.raw.clone()); + for test in tests { + let res_msg = MsgChannelOpenAck::try_from(test.raw.clone()); - assert_eq!( - test.want_pass, - res_msg.is_ok(), - "MsgChanOpenAck::try_from raw failed for test {}, \nraw msg {:?} with error {:?}", - test.name, - test.raw, - res_msg.err(), - ); - } - } + assert_eq!( + test.want_pass, + res_msg.is_ok(), + "MsgChanOpenAck::try_from raw failed for test {}, \nraw msg {:?} with error {:?}", + test.name, + test.raw, + res_msg.err(), + ); + } + } - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_chan_open_ack(100); - let msg = MsgChannelOpenAck::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgChannelOpenAck::from(msg.clone()); - let msg_back = MsgChannelOpenAck::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_chan_open_ack(100); + let msg = MsgChannelOpenAck::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgChannelOpenAck::from(msg.clone()); + let msg_back = MsgChannelOpenAck::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/chan_open_confirm.rs b/modules/src/core/ics04_channel/msgs/chan_open_confirm.rs index 7ad004adb3..6cc73b1cc9 100644 --- a/modules/src/core/ics04_channel/msgs/chan_open_confirm.rs +++ b/modules/src/core/ics04_channel/msgs/chan_open_confirm.rs @@ -1,9 +1,13 @@ -use crate::core::ics04_channel::error::Error; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::prelude::*; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ + ics04_channel::error::Error, + ics24_host::identifier::{ChannelId, PortId}, + }, + prelude::*, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; use ibc_proto::ibc::core::channel::v1::MsgChannelOpenConfirm as RawMsgChannelOpenConfirm; use tendermint_proto::Protobuf; @@ -13,125 +17,116 @@ pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgChannelOpenConfirm"; /// /// Message definition for the fourth step in the channel open handshake (`ChanOpenConfirm` /// datagram). -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgChannelOpenConfirm { - pub port_id: PortId, - pub channel_id: ChannelId, - pub proofs: Proofs, - pub signer: Signer, + pub port_id: PortId, + pub channel_id: ChannelId, + pub proofs: Proofs, + pub signer: Signer, } impl MsgChannelOpenConfirm { - pub fn new(port_id: PortId, channel_id: ChannelId, proofs: Proofs, signer: Signer) -> Self { - Self { - port_id, - channel_id, - proofs, - signer, - } - } + pub fn new(port_id: PortId, channel_id: ChannelId, proofs: Proofs, signer: Signer) -> Self { + Self { port_id, channel_id, proofs, signer } + } } impl Msg for MsgChannelOpenConfirm { - type ValidationError = Error; - type Raw = RawMsgChannelOpenConfirm; + type ValidationError = Error; + type Raw = RawMsgChannelOpenConfirm; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgChannelOpenConfirm {} impl TryFrom for MsgChannelOpenConfirm { - type Error = Error; - - fn try_from(raw_msg: RawMsgChannelOpenConfirm) -> Result { - let proofs = Proofs::new( - raw_msg.proof_ack.try_into().map_err(Error::invalid_proof)?, - None, - None, - None, - raw_msg - .proof_height - .ok_or_else(Error::missing_height)? - .into(), - ) - .map_err(Error::invalid_proof)?; - - Ok(MsgChannelOpenConfirm { - port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, - channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, - proofs, - signer: raw_msg.signer.parse().map_err(Error::signer)?, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgChannelOpenConfirm) -> Result { + let proofs = Proofs::new( + raw_msg.proof_ack.try_into().map_err(Error::invalid_proof)?, + None, + None, + None, + raw_msg.proof_height.ok_or_else(Error::missing_height)?.into(), + ) + .map_err(Error::invalid_proof)?; + + Ok(MsgChannelOpenConfirm { + port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, + channel_id: raw_msg.channel_id.parse().map_err(Error::identifier)?, + proofs, + signer: raw_msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgChannelOpenConfirm { - fn from(domain_msg: MsgChannelOpenConfirm) -> Self { - RawMsgChannelOpenConfirm { - port_id: domain_msg.port_id.to_string(), - channel_id: domain_msg.channel_id.to_string(), - proof_ack: domain_msg.proofs.object_proof().clone().into(), - proof_height: Some(domain_msg.proofs.height().into()), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgChannelOpenConfirm) -> Self { + RawMsgChannelOpenConfirm { + port_id: domain_msg.port_id.to_string(), + channel_id: domain_msg.channel_id.to_string(), + proof_ack: domain_msg.proofs.object_proof().clone().into(), + proof_height: Some(domain_msg.proofs.height().into()), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenConfirm as RawMsgChannelOpenConfirm; - - use crate::core::ics24_host::identifier::{ChannelId, PortId}; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - use ibc_proto::ibc::core::client::v1::Height; - - /// Returns a dummy `RawMsgChannelOpenConfirm`, for testing only! - pub fn get_dummy_raw_msg_chan_open_confirm(proof_height: u64) -> RawMsgChannelOpenConfirm { - RawMsgChannelOpenConfirm { - port_id: PortId::default().to_string(), - channel_id: ChannelId::default().to_string(), - proof_ack: get_dummy_proof(), - proof_height: Some(Height { - revision_number: 0, - revision_height: proof_height, - }), - signer: get_dummy_bech32_account(), - } - } + use crate::prelude::*; + use ibc_proto::ibc::core::channel::v1::MsgChannelOpenConfirm as RawMsgChannelOpenConfirm; + + use crate::{ + core::ics24_host::identifier::{ChannelId, PortId}, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + use ibc_proto::ibc::core::client::v1::Height; + + /// Returns a dummy `RawMsgChannelOpenConfirm`, for testing only! + pub fn get_dummy_raw_msg_chan_open_confirm(proof_height: u64) -> RawMsgChannelOpenConfirm { + RawMsgChannelOpenConfirm { + port_id: PortId::default().to_string(), + channel_id: ChannelId::default().to_string(), + proof_ack: get_dummy_proof(), + proof_height: Some(Height { revision_number: 0, revision_height: proof_height }), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenConfirm as RawMsgChannelOpenConfirm; - use test_log::test; + use crate::prelude::*; + use ibc_proto::ibc::core::channel::v1::MsgChannelOpenConfirm as RawMsgChannelOpenConfirm; + use test_log::test; - use crate::core::ics04_channel::msgs::chan_open_confirm::test_util::get_dummy_raw_msg_chan_open_confirm; - use crate::core::ics04_channel::msgs::chan_open_confirm::MsgChannelOpenConfirm; + use crate::core::ics04_channel::msgs::chan_open_confirm::{ + test_util::get_dummy_raw_msg_chan_open_confirm, MsgChannelOpenConfirm, + }; - use ibc_proto::ibc::core::client::v1::Height; + use ibc_proto::ibc::core::client::v1::Height; - #[test] - fn parse_channel_open_confirm_msg() { - struct Test { - name: String, - raw: RawMsgChannelOpenConfirm, - want_pass: bool, - } + #[test] + fn parse_channel_open_confirm_msg() { + struct Test { + name: String, + raw: RawMsgChannelOpenConfirm, + want_pass: bool, + } - let proof_height = 78; - let default_raw_msg = get_dummy_raw_msg_chan_open_confirm(proof_height); + let proof_height = 78; + let default_raw_msg = get_dummy_raw_msg_chan_open_confirm(proof_height); - let tests: Vec = vec![ + let tests: Vec = vec![ Test { name: "Good parameters".to_string(), raw: default_raw_msg.clone(), @@ -208,27 +203,27 @@ mod tests { .into_iter() .collect(); - for test in tests { - let res_msg = MsgChannelOpenConfirm::try_from(test.raw.clone()); - - assert_eq!( - test.want_pass, - res_msg.is_ok(), - "MsgChanOpenConfirm::try_from failed for test {}, \nraw msg {:?} with error {:?}", - test.name, - test.raw, - res_msg.err(), - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_chan_open_confirm(19); - let msg = MsgChannelOpenConfirm::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgChannelOpenConfirm::from(msg.clone()); - let msg_back = MsgChannelOpenConfirm::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + for test in tests { + let res_msg = MsgChannelOpenConfirm::try_from(test.raw.clone()); + + assert_eq!( + test.want_pass, + res_msg.is_ok(), + "MsgChanOpenConfirm::try_from failed for test {}, \nraw msg {:?} with error {:?}", + test.name, + test.raw, + res_msg.err(), + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_chan_open_confirm(19); + let msg = MsgChannelOpenConfirm::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgChannelOpenConfirm::from(msg.clone()); + let msg_back = MsgChannelOpenConfirm::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/chan_open_init.rs b/modules/src/core/ics04_channel/msgs/chan_open_init.rs index b5b4130a90..c12d3b82a6 100644 --- a/modules/src/core/ics04_channel/msgs/chan_open_init.rs +++ b/modules/src/core/ics04_channel/msgs/chan_open_init.rs @@ -1,9 +1,12 @@ -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::error::Error; -use crate::core::ics24_host::identifier::PortId; -use crate::prelude::*; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ + ics04_channel::{channel::ChannelEnd, error::Error}, + ics24_host::identifier::PortId, + }, + prelude::*, + signer::Signer, + tx_msg::Msg, +}; use ibc_proto::ibc::core::channel::v1::MsgChannelOpenInit as RawMsgChannelOpenInit; use tendermint_proto::Protobuf; @@ -12,149 +15,145 @@ pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgChannelOpenInit"; /// /// Message definition for the first step in the channel open handshake (`ChanOpenInit` datagram). -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgChannelOpenInit { - pub port_id: PortId, - pub channel: ChannelEnd, - pub signer: Signer, + pub port_id: PortId, + pub channel: ChannelEnd, + pub signer: Signer, } impl MsgChannelOpenInit { - pub fn new(port_id: PortId, channel: ChannelEnd, signer: Signer) -> Self { - Self { - port_id, - channel, - signer, - } - } + pub fn new(port_id: PortId, channel: ChannelEnd, signer: Signer) -> Self { + Self { port_id, channel, signer } + } } impl Msg for MsgChannelOpenInit { - type ValidationError = Error; - type Raw = RawMsgChannelOpenInit; + type ValidationError = Error; + type Raw = RawMsgChannelOpenInit; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgChannelOpenInit {} impl TryFrom for MsgChannelOpenInit { - type Error = Error; - - fn try_from(raw_msg: RawMsgChannelOpenInit) -> Result { - Ok(MsgChannelOpenInit { - port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, - channel: raw_msg - .channel - .ok_or_else(Error::missing_channel)? - .try_into()?, - signer: raw_msg.signer.parse().map_err(Error::signer)?, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgChannelOpenInit) -> Result { + Ok(MsgChannelOpenInit { + port_id: raw_msg.port_id.parse().map_err(Error::identifier)?, + channel: raw_msg.channel.ok_or_else(Error::missing_channel)?.try_into()?, + signer: raw_msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgChannelOpenInit { - fn from(domain_msg: MsgChannelOpenInit) -> Self { - RawMsgChannelOpenInit { - port_id: domain_msg.port_id.to_string(), - channel: Some(domain_msg.channel.into()), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgChannelOpenInit) -> Self { + RawMsgChannelOpenInit { + port_id: domain_msg.port_id.to_string(), + channel: Some(domain_msg.channel.into()), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenInit as RawMsgChannelOpenInit; - - use crate::core::ics04_channel::channel::test_util::get_dummy_raw_channel_end; - use crate::core::ics24_host::identifier::PortId; - use crate::test_utils::get_dummy_bech32_account; - - /// Returns a dummy `RawMsgChannelOpenInit`, for testing only! - pub fn get_dummy_raw_msg_chan_open_init() -> RawMsgChannelOpenInit { - RawMsgChannelOpenInit { - port_id: PortId::default().to_string(), - channel: Some(get_dummy_raw_channel_end()), - signer: get_dummy_bech32_account(), - } - } + use crate::prelude::*; + use ibc_proto::ibc::core::channel::v1::MsgChannelOpenInit as RawMsgChannelOpenInit; + + use crate::{ + core::{ + ics04_channel::channel::test_util::get_dummy_raw_channel_end, + ics24_host::identifier::PortId, + }, + test_utils::get_dummy_bech32_account, + }; + + /// Returns a dummy `RawMsgChannelOpenInit`, for testing only! + pub fn get_dummy_raw_msg_chan_open_init() -> RawMsgChannelOpenInit { + RawMsgChannelOpenInit { + port_id: PortId::default().to_string(), + channel: Some(get_dummy_raw_channel_end()), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod tests { - use crate::core::ics04_channel::msgs::chan_open_init::test_util::get_dummy_raw_msg_chan_open_init; - use crate::core::ics04_channel::msgs::chan_open_init::MsgChannelOpenInit; - use crate::prelude::*; - - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenInit as RawMsgChannelOpenInit; - use test_log::test; - - #[test] - fn channel_open_init_from_raw() { - struct Test { - name: String, - raw: RawMsgChannelOpenInit, - want_pass: bool, - } - - let default_raw_msg = get_dummy_raw_msg_chan_open_init(); - - let tests: Vec = vec![ - Test { - name: "Good parameters".to_string(), - raw: default_raw_msg.clone(), - want_pass: true, - }, - Test { - name: "Incorrect port identifier, slash (separator) prohibited".to_string(), - raw: RawMsgChannelOpenInit { - port_id: "p34/".to_string(), - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Missing channel".to_string(), - raw: RawMsgChannelOpenInit { - channel: None, - ..default_raw_msg - }, - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res_msg = MsgChannelOpenInit::try_from(test.raw.clone()); - - assert_eq!( - test.want_pass, - res_msg.is_ok(), - "MsgChanOpenInit::try_from failed for test {}, \nraw msg {:?} with error {:?}", - test.name, - test.raw, - res_msg.err(), - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_chan_open_init(); - let msg = MsgChannelOpenInit::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgChannelOpenInit::from(msg.clone()); - let msg_back = MsgChannelOpenInit::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + use crate::{ + core::ics04_channel::msgs::chan_open_init::{ + test_util::get_dummy_raw_msg_chan_open_init, MsgChannelOpenInit, + }, + prelude::*, + }; + + use ibc_proto::ibc::core::channel::v1::MsgChannelOpenInit as RawMsgChannelOpenInit; + use test_log::test; + + #[test] + fn channel_open_init_from_raw() { + struct Test { + name: String, + raw: RawMsgChannelOpenInit, + want_pass: bool, + } + + let default_raw_msg = get_dummy_raw_msg_chan_open_init(); + + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_raw_msg.clone(), + want_pass: true, + }, + Test { + name: "Incorrect port identifier, slash (separator) prohibited".to_string(), + raw: RawMsgChannelOpenInit { + port_id: "p34/".to_string(), + ..default_raw_msg.clone() + }, + want_pass: false, + }, + Test { + name: "Missing channel".to_string(), + raw: RawMsgChannelOpenInit { channel: None, ..default_raw_msg }, + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res_msg = MsgChannelOpenInit::try_from(test.raw.clone()); + + assert_eq!( + test.want_pass, + res_msg.is_ok(), + "MsgChanOpenInit::try_from failed for test {}, \nraw msg {:?} with error {:?}", + test.name, + test.raw, + res_msg.err(), + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_chan_open_init(); + let msg = MsgChannelOpenInit::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgChannelOpenInit::from(msg.clone()); + let msg_back = MsgChannelOpenInit::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/chan_open_try.rs b/modules/src/core/ics04_channel/msgs/chan_open_try.rs index 5af004d529..cbfd154ee9 100644 --- a/modules/src/core/ics04_channel/msgs/chan_open_try.rs +++ b/modules/src/core/ics04_channel/msgs/chan_open_try.rs @@ -1,184 +1,159 @@ -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::error::Error as ChannelError; -use crate::core::ics04_channel::Version; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::prelude::*; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::{ + ics04_channel::{channel::ChannelEnd, error::Error as ChannelError, Version}, + ics24_host::{error::ValidationError, identifier::PortId}, + }, + prelude::*, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; use ibc_proto::ibc::core::channel::v1::MsgChannelOpenTry as RawMsgChannelOpenTry; use tendermint_proto::Protobuf; -use core::str::FromStr; - pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgChannelOpenTry"; /// /// Message definition for the second step in the channel open handshake (`ChanOpenTry` datagram). -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgChannelOpenTry { - pub port_id: PortId, - pub previous_channel_id: Option, - pub channel: ChannelEnd, - pub counterparty_version: Version, - pub proofs: Proofs, - pub signer: Signer, + pub port_id: PortId, + pub channel: ChannelEnd, + pub counterparty_version: Version, + pub proofs: Proofs, + pub signer: Signer, } impl MsgChannelOpenTry { - pub fn new( - port_id: PortId, - previous_channel_id: Option, - channel: ChannelEnd, - counterparty_version: Version, - proofs: Proofs, - signer: Signer, - ) -> Self { - Self { - port_id, - previous_channel_id, - channel, - counterparty_version, - proofs, - signer, - } - } + pub fn new( + port_id: PortId, + channel: ChannelEnd, + counterparty_version: Version, + proofs: Proofs, + signer: Signer, + ) -> Self { + Self { port_id, channel, counterparty_version, proofs, signer } + } } impl Msg for MsgChannelOpenTry { - type ValidationError = ChannelError; - type Raw = RawMsgChannelOpenTry; + type ValidationError = ChannelError; + type Raw = RawMsgChannelOpenTry; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } - fn validate_basic(&self) -> Result<(), ValidationError> { - match self.channel.counterparty().channel_id() { - None => Err(ValidationError::invalid_counterparty_channel_id()), - Some(_c) => Ok(()), - } - } + fn validate_basic(&self) -> Result<(), ValidationError> { + match self.channel.counterparty().channel_id() { + None => Err(ValidationError::invalid_counterparty_channel_id()), + Some(_c) => Ok(()), + } + } } impl Protobuf for MsgChannelOpenTry {} impl TryFrom for MsgChannelOpenTry { - type Error = ChannelError; + type Error = ChannelError; - fn try_from(raw_msg: RawMsgChannelOpenTry) -> Result { - let proofs = Proofs::new( - raw_msg - .proof_init - .try_into() - .map_err(ChannelError::invalid_proof)?, - None, - None, - None, - raw_msg - .proof_height - .ok_or_else(ChannelError::missing_height)? - .into(), - ) - .map_err(ChannelError::invalid_proof)?; + fn try_from(raw_msg: RawMsgChannelOpenTry) -> Result { + let proofs = Proofs::new( + raw_msg.proof_init.try_into().map_err(ChannelError::invalid_proof)?, + None, + None, + None, + raw_msg.proof_height.ok_or_else(ChannelError::missing_height)?.into(), + ) + .map_err(ChannelError::invalid_proof)?; - let previous_channel_id = Some(raw_msg.previous_channel_id) - .filter(|x| !x.is_empty()) - .map(|v| FromStr::from_str(v.as_str())) - .transpose() - .map_err(ChannelError::identifier)?; + let msg = MsgChannelOpenTry { + port_id: raw_msg.port_id.parse().map_err(ChannelError::identifier)?, + channel: raw_msg.channel.ok_or_else(ChannelError::missing_channel)?.try_into()?, + counterparty_version: raw_msg.counterparty_version.into(), + proofs, + signer: raw_msg.signer.parse().map_err(ChannelError::signer)?, + }; - let msg = MsgChannelOpenTry { - port_id: raw_msg.port_id.parse().map_err(ChannelError::identifier)?, - previous_channel_id, - channel: raw_msg - .channel - .ok_or_else(ChannelError::missing_channel)? - .try_into()?, - counterparty_version: raw_msg.counterparty_version.into(), - proofs, - signer: raw_msg.signer.parse().map_err(ChannelError::signer)?, - }; + msg.validate_basic().map_err(ChannelError::invalid_counterparty_channel_id)?; - msg.validate_basic() - .map_err(ChannelError::invalid_counterparty_channel_id)?; - - Ok(msg) - } + Ok(msg) + } } impl From for RawMsgChannelOpenTry { - fn from(domain_msg: MsgChannelOpenTry) -> Self { - RawMsgChannelOpenTry { - port_id: domain_msg.port_id.to_string(), - previous_channel_id: domain_msg - .previous_channel_id - .map_or_else(|| "".to_string(), |v| v.to_string()), - channel: Some(domain_msg.channel.into()), - counterparty_version: domain_msg.counterparty_version.to_string(), - proof_init: domain_msg.proofs.object_proof().clone().into(), - proof_height: Some(domain_msg.proofs.height().into()), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgChannelOpenTry) -> Self { + RawMsgChannelOpenTry { + port_id: domain_msg.port_id.to_string(), + channel: Some(domain_msg.channel.into()), + counterparty_version: domain_msg.counterparty_version.to_string(), + proof_init: domain_msg.proofs.object_proof().clone().into(), + proof_height: Some(domain_msg.proofs.height().into()), + signer: domain_msg.signer.to_string(), + ..Default::default() + } + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenTry as RawMsgChannelOpenTry; + use crate::prelude::*; + use ibc_proto::ibc::core::channel::v1::MsgChannelOpenTry as RawMsgChannelOpenTry; - use crate::core::ics04_channel::channel::test_util::get_dummy_raw_channel_end; - use crate::core::ics24_host::identifier::{ChannelId, PortId}; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - use ibc_proto::ibc::core::client::v1::Height; + use crate::{ + core::{ + ics04_channel::channel::test_util::get_dummy_raw_channel_end, + ics24_host::identifier::PortId, + }, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + use ibc_proto::ibc::core::client::v1::Height; - /// Returns a dummy `RawMsgChannelOpenTry`, for testing only! - pub fn get_dummy_raw_msg_chan_open_try(proof_height: u64) -> RawMsgChannelOpenTry { - RawMsgChannelOpenTry { - port_id: PortId::default().to_string(), - previous_channel_id: ChannelId::default().to_string(), - channel: Some(get_dummy_raw_channel_end()), - counterparty_version: "".to_string(), - proof_init: get_dummy_proof(), - proof_height: Some(Height { - revision_number: 0, - revision_height: proof_height, - }), - signer: get_dummy_bech32_account(), - } - } + /// Returns a dummy `RawMsgChannelOpenTry`, for testing only! + pub fn get_dummy_raw_msg_chan_open_try(proof_height: u64) -> RawMsgChannelOpenTry { + RawMsgChannelOpenTry { + port_id: PortId::default().to_string(), + channel: Some(get_dummy_raw_channel_end()), + counterparty_version: "".to_string(), + proof_init: get_dummy_proof(), + proof_height: Some(Height { revision_number: 0, revision_height: proof_height }), + signer: get_dummy_bech32_account(), + ..Default::default() + } + } } #[cfg(test)] mod tests { - use crate::core::ics04_channel::msgs::chan_open_try::test_util::get_dummy_raw_msg_chan_open_try; - use crate::core::ics04_channel::msgs::chan_open_try::MsgChannelOpenTry; - use crate::prelude::*; + use crate::{ + core::ics04_channel::msgs::chan_open_try::{ + test_util::get_dummy_raw_msg_chan_open_try, MsgChannelOpenTry, + }, + prelude::*, + }; - use ibc_proto::ibc::core::channel::v1::MsgChannelOpenTry as RawMsgChannelOpenTry; - use ibc_proto::ibc::core::client::v1::Height; - use test_log::test; + use ibc_proto::ibc::core::{ + channel::v1::MsgChannelOpenTry as RawMsgChannelOpenTry, client::v1::Height, + }; + use test_log::test; - #[test] - fn channel_open_try_from_raw() { - struct Test { - name: String, - raw: RawMsgChannelOpenTry, - want_pass: bool, - } + #[test] + fn channel_open_try_from_raw() { + struct Test { + name: String, + raw: RawMsgChannelOpenTry, + want_pass: bool, + } - let proof_height = 10; - let default_raw_msg = get_dummy_raw_msg_chan_open_try(proof_height); + let proof_height = 10; + let default_raw_msg = get_dummy_raw_msg_chan_open_try(proof_height); - let tests: Vec = vec![ + let tests: Vec = vec![ Test { name: "Good parameters".to_string(), raw: default_raw_msg.clone(), @@ -211,27 +186,10 @@ mod tests { Test { name: "Correct channel identifier".to_string(), raw: RawMsgChannelOpenTry { - previous_channel_id: "channel-34".to_string(), ..default_raw_msg.clone() }, want_pass: true, }, - Test { - name: "Bad channel, name too short".to_string(), - raw: RawMsgChannelOpenTry { - previous_channel_id: "chshort".to_string(), - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Bad channel, name too long".to_string(), - raw: RawMsgChannelOpenTry { - previous_channel_id: "channel-12839128379182739812739879".to_string(), - ..default_raw_msg.clone() - }, - want_pass: false, - }, Test { name: "Empty counterparty version (valid choice)".to_string(), raw: RawMsgChannelOpenTry { @@ -279,27 +237,27 @@ mod tests { .into_iter() .collect(); - for test in tests { - let res_msg = MsgChannelOpenTry::try_from(test.raw.clone()); + for test in tests { + let res_msg = MsgChannelOpenTry::try_from(test.raw.clone()); - assert_eq!( - test.want_pass, - res_msg.is_ok(), - "MsgChanOpenTry::try_from failed for test {}, \nraw msg {:?} with error {:?}", - test.name, - test.raw, - res_msg.err(), - ); - } - } + assert_eq!( + test.want_pass, + res_msg.is_ok(), + "MsgChanOpenTry::try_from failed for test {}, \nraw msg {:?} with error {:?}", + test.name, + test.raw, + res_msg.err(), + ); + } + } - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_chan_open_try(10); - let msg = MsgChannelOpenTry::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgChannelOpenTry::from(msg.clone()); - let msg_back = MsgChannelOpenTry::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_chan_open_try(10); + let msg = MsgChannelOpenTry::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgChannelOpenTry::from(msg.clone()); + let msg_back = MsgChannelOpenTry::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/recv_packet.rs b/modules/src/core/ics04_channel/msgs/recv_packet.rs index 7aad7c6eec..6a71da410b 100644 --- a/modules/src/core/ics04_channel/msgs/recv_packet.rs +++ b/modules/src/core/ics04_channel/msgs/recv_packet.rs @@ -4,196 +4,173 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::core::channel::v1::MsgRecvPacket as RawMsgRecvPacket; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::packet::Packet; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::ics04_channel::{error::Error, packet::Packet}, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgRecvPacket"; /// /// Message definition for the "packet receiving" datagram. -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgRecvPacket { - pub packet: Packet, - pub proofs: Proofs, - pub signer: Signer, + pub packet: Packet, + pub proofs: Proofs, + pub signer: Signer, } impl MsgRecvPacket { - pub fn new(packet: Packet, proofs: Proofs, signer: Signer) -> MsgRecvPacket { - Self { - packet, - proofs, - signer, - } - } + pub fn new(packet: Packet, proofs: Proofs, signer: Signer) -> MsgRecvPacket { + Self { packet, proofs, signer } + } } impl Msg for MsgRecvPacket { - type ValidationError = Error; - type Raw = RawMsgRecvPacket; + type ValidationError = Error; + type Raw = RawMsgRecvPacket; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgRecvPacket {} impl TryFrom for MsgRecvPacket { - type Error = Error; - - fn try_from(raw_msg: RawMsgRecvPacket) -> Result { - let proofs = Proofs::new( - raw_msg - .proof_commitment - .try_into() - .map_err(Error::invalid_proof)?, - None, - None, - None, - raw_msg - .proof_height - .ok_or_else(Error::missing_height)? - .into(), - ) - .map_err(Error::invalid_proof)?; - - Ok(MsgRecvPacket { - packet: raw_msg - .packet - .ok_or_else(Error::missing_packet)? - .try_into()?, - proofs, - signer: raw_msg.signer.parse().map_err(Error::signer)?, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgRecvPacket) -> Result { + let proofs = Proofs::new( + raw_msg.proof_commitment.try_into().map_err(Error::invalid_proof)?, + None, + None, + None, + raw_msg.proof_height.ok_or_else(Error::missing_height)?.into(), + ) + .map_err(Error::invalid_proof)?; + + Ok(MsgRecvPacket { + packet: raw_msg.packet.ok_or_else(Error::missing_packet)?.try_into()?, + proofs, + signer: raw_msg.signer.parse().map_err(Error::signer)?, + }) + } } impl From for RawMsgRecvPacket { - fn from(domain_msg: MsgRecvPacket) -> Self { - RawMsgRecvPacket { - packet: Some(domain_msg.packet.into()), - proof_commitment: domain_msg.proofs.object_proof().clone().into(), - proof_height: Some(domain_msg.proofs.height().into()), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgRecvPacket) -> Self { + RawMsgRecvPacket { + packet: Some(domain_msg.packet.into()), + proof_commitment: domain_msg.proofs.object_proof().clone().into(), + proof_height: Some(domain_msg.proofs.height().into()), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use ibc_proto::ibc::core::channel::v1::MsgRecvPacket as RawMsgRecvPacket; - use ibc_proto::ibc::core::client::v1::Height as RawHeight; - - use crate::core::ics04_channel::packet::test_utils::get_dummy_raw_packet; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - use crate::timestamp::Timestamp; - use core::ops::Add; - use core::time::Duration; - - /// Returns a dummy `RawMsgRecvPacket`, for testing only! The `height` parametrizes both the - /// proof height as well as the timeout height. - pub fn get_dummy_raw_msg_recv_packet(height: u64) -> RawMsgRecvPacket { - let timestamp = Timestamp::now().add(Duration::from_secs(9)); - RawMsgRecvPacket { - packet: Some(get_dummy_raw_packet( - height, - timestamp.unwrap().nanoseconds(), - )), - proof_commitment: get_dummy_proof(), - proof_height: Some(RawHeight { - revision_number: 0, - revision_height: height, - }), - signer: get_dummy_bech32_account(), - } - } + use ibc_proto::ibc::core::{ + channel::v1::MsgRecvPacket as RawMsgRecvPacket, client::v1::Height as RawHeight, + }; + + use crate::{ + core::ics04_channel::packet::test_utils::get_dummy_raw_packet, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + timestamp::Timestamp, + }; + use core::{ops::Add, time::Duration}; + + /// Returns a dummy `RawMsgRecvPacket`, for testing only! The `height` parametrizes both the + /// proof height as well as the timeout height. + pub fn get_dummy_raw_msg_recv_packet(height: u64) -> RawMsgRecvPacket { + let timestamp = Timestamp::now().add(Duration::from_secs(9)); + RawMsgRecvPacket { + packet: Some(get_dummy_raw_packet(height, timestamp.unwrap().nanoseconds())), + proof_commitment: get_dummy_proof(), + proof_height: Some(RawHeight { revision_number: 0, revision_height: height }), + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod test { - use crate::prelude::*; - - use test_log::test; - - use ibc_proto::ibc::core::channel::v1::MsgRecvPacket as RawMsgRecvPacket; - - use crate::core::ics04_channel::error::Error; - use crate::core::ics04_channel::msgs::recv_packet::test_util::get_dummy_raw_msg_recv_packet; - use crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket; - use crate::test_utils::get_dummy_bech32_account; - - #[test] - fn msg_recv_packet_try_from_raw() { - struct Test { - name: String, - raw: RawMsgRecvPacket, - want_pass: bool, - } - - let height = 20; - let default_raw_msg = get_dummy_raw_msg_recv_packet(height); - let tests: Vec = vec![ - Test { - name: "Good parameters".to_string(), - raw: default_raw_msg.clone(), - want_pass: true, - }, - Test { - name: "Missing proof".to_string(), - raw: RawMsgRecvPacket { - proof_commitment: Vec::new(), - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Missing proof height".to_string(), - raw: RawMsgRecvPacket { - proof_height: None, - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Empty signer".to_string(), - raw: RawMsgRecvPacket { - signer: get_dummy_bech32_account(), - ..default_raw_msg - }, - want_pass: true, - }, - ]; - - for test in tests { - let res_msg: Result = test.raw.clone().try_into(); - - assert_eq!( - res_msg.is_ok(), - test.want_pass, - "MsgRecvPacket::try_from failed for test {} \nraw message: {:?} with error: {:?}", - test.name, - test.raw, - res_msg.err() - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_recv_packet(15); - let msg = MsgRecvPacket::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgRecvPacket::from(msg.clone()); - let msg_back = MsgRecvPacket::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + use crate::prelude::*; + + use test_log::test; + + use ibc_proto::ibc::core::channel::v1::MsgRecvPacket as RawMsgRecvPacket; + + use crate::{ + core::ics04_channel::{ + error::Error, + msgs::recv_packet::{test_util::get_dummy_raw_msg_recv_packet, MsgRecvPacket}, + }, + test_utils::get_dummy_bech32_account, + }; + + #[test] + fn msg_recv_packet_try_from_raw() { + struct Test { + name: String, + raw: RawMsgRecvPacket, + want_pass: bool, + } + + let height = 20; + let default_raw_msg = get_dummy_raw_msg_recv_packet(height); + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_raw_msg.clone(), + want_pass: true, + }, + Test { + name: "Missing proof".to_string(), + raw: RawMsgRecvPacket { proof_commitment: Vec::new(), ..default_raw_msg.clone() }, + want_pass: false, + }, + Test { + name: "Missing proof height".to_string(), + raw: RawMsgRecvPacket { proof_height: None, ..default_raw_msg.clone() }, + want_pass: false, + }, + Test { + name: "Empty signer".to_string(), + raw: RawMsgRecvPacket { signer: get_dummy_bech32_account(), ..default_raw_msg }, + want_pass: true, + }, + ]; + + for test in tests { + let res_msg: Result = test.raw.clone().try_into(); + + assert_eq!( + res_msg.is_ok(), + test.want_pass, + "MsgRecvPacket::try_from failed for test {} \nraw message: {:?} with error: {:?}", + test.name, + test.raw, + res_msg.err() + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_recv_packet(15); + let msg = MsgRecvPacket::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgRecvPacket::from(msg.clone()); + let msg_back = MsgRecvPacket::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/timeout.rs b/modules/src/core/ics04_channel/msgs/timeout.rs index a2887e1013..d36dca14da 100644 --- a/modules/src/core/ics04_channel/msgs/timeout.rs +++ b/modules/src/core/ics04_channel/msgs/timeout.rs @@ -4,211 +4,191 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::core::channel::v1::MsgTimeout as RawMsgTimeout; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::packet::{Packet, Sequence}; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::ics04_channel::{ + error::Error, + packet::{Packet, Sequence}, + }, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgTimeout"; /// /// Message definition for packet timeout domain type. -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgTimeout { - pub packet: Packet, - pub next_sequence_recv: Sequence, - pub proofs: Proofs, - pub signer: Signer, + pub packet: Packet, + pub next_sequence_recv: Sequence, + pub proofs: Proofs, + pub signer: Signer, } impl MsgTimeout { - pub fn new( - packet: Packet, - next_sequence_recv: Sequence, - proofs: Proofs, - signer: Signer, - ) -> MsgTimeout { - Self { - packet, - next_sequence_recv, - proofs, - signer, - } - } + pub fn new( + packet: Packet, + next_sequence_recv: Sequence, + proofs: Proofs, + signer: Signer, + ) -> MsgTimeout { + Self { packet, next_sequence_recv, proofs, signer } + } } impl Msg for MsgTimeout { - type ValidationError = Error; - type Raw = RawMsgTimeout; + type ValidationError = Error; + type Raw = RawMsgTimeout; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgTimeout {} impl TryFrom for MsgTimeout { - type Error = Error; - - fn try_from(raw_msg: RawMsgTimeout) -> Result { - let proofs = Proofs::new( - raw_msg - .proof_unreceived - .try_into() - .map_err(Error::invalid_proof)?, - None, - None, - None, - raw_msg - .proof_height - .ok_or_else(Error::missing_height)? - .into(), - ) - .map_err(Error::invalid_proof)?; - - // TODO: Domain type verification for the next sequence: this should probably be > 0. - - Ok(MsgTimeout { - packet: raw_msg - .packet - .ok_or_else(Error::missing_packet)? - .try_into()?, - next_sequence_recv: Sequence::from(raw_msg.next_sequence_recv), - signer: raw_msg.signer.parse().map_err(Error::signer)?, - proofs, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgTimeout) -> Result { + let proofs = Proofs::new( + raw_msg.proof_unreceived.try_into().map_err(Error::invalid_proof)?, + None, + None, + None, + raw_msg.proof_height.ok_or_else(Error::missing_height)?.into(), + ) + .map_err(Error::invalid_proof)?; + + // TODO: Domain type verification for the next sequence: this should probably be > 0. + + Ok(MsgTimeout { + packet: raw_msg.packet.ok_or_else(Error::missing_packet)?.try_into()?, + next_sequence_recv: Sequence::from(raw_msg.next_sequence_recv), + signer: raw_msg.signer.parse().map_err(Error::signer)?, + proofs, + }) + } } impl From for RawMsgTimeout { - fn from(domain_msg: MsgTimeout) -> Self { - RawMsgTimeout { - packet: Some(domain_msg.packet.into()), - proof_unreceived: domain_msg.proofs.object_proof().clone().into(), - proof_height: Some(domain_msg.proofs.height().into()), - next_sequence_recv: domain_msg.next_sequence_recv.into(), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgTimeout) -> Self { + RawMsgTimeout { + packet: Some(domain_msg.packet.into()), + proof_unreceived: domain_msg.proofs.object_proof().clone().into(), + proof_height: Some(domain_msg.proofs.height().into()), + next_sequence_recv: domain_msg.next_sequence_recv.into(), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use ibc_proto::ibc::core::channel::v1::MsgTimeout as RawMsgTimeout; - use ibc_proto::ibc::core::client::v1::Height as RawHeight; - - use crate::core::ics04_channel::packet::test_utils::get_dummy_raw_packet; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - - /// Returns a dummy `RawMsgTimeout`, for testing only! - /// The `height` parametrizes both the proof height as well as the timeout height. - pub fn get_dummy_raw_msg_timeout(height: u64, timeout_timestamp: u64) -> RawMsgTimeout { - RawMsgTimeout { - packet: Some(get_dummy_raw_packet(height, timeout_timestamp)), - proof_unreceived: get_dummy_proof(), - proof_height: Some(RawHeight { - revision_number: 0, - revision_height: height, - }), - next_sequence_recv: 1, - signer: get_dummy_bech32_account(), - } - } + use ibc_proto::ibc::core::{ + channel::v1::MsgTimeout as RawMsgTimeout, client::v1::Height as RawHeight, + }; + + use crate::{ + core::ics04_channel::packet::test_utils::get_dummy_raw_packet, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + + /// Returns a dummy `RawMsgTimeout`, for testing only! + /// The `height` parametrizes both the proof height as well as the timeout height. + pub fn get_dummy_raw_msg_timeout(height: u64, timeout_timestamp: u64) -> RawMsgTimeout { + RawMsgTimeout { + packet: Some(get_dummy_raw_packet(height, timeout_timestamp)), + proof_unreceived: get_dummy_proof(), + proof_height: Some(RawHeight { revision_number: 0, revision_height: height }), + next_sequence_recv: 1, + signer: get_dummy_bech32_account(), + } + } } #[cfg(test)] mod test { - use crate::prelude::*; - - use test_log::test; - - use ibc_proto::ibc::core::channel::v1::MsgTimeout as RawMsgTimeout; - - use crate::core::ics04_channel::error::Error; - use crate::core::ics04_channel::msgs::timeout::test_util::get_dummy_raw_msg_timeout; - use crate::core::ics04_channel::msgs::timeout::MsgTimeout; - use crate::test_utils::get_dummy_bech32_account; - - #[test] - fn msg_timeout_try_from_raw() { - struct Test { - name: String, - raw: RawMsgTimeout, - want_pass: bool, - } - - let height = 50; - let timeout_timestamp = 0; - let default_raw_msg = get_dummy_raw_msg_timeout(height, timeout_timestamp); - - let tests: Vec = vec![ - Test { - name: "Good parameters".to_string(), - raw: default_raw_msg.clone(), - want_pass: true, - }, - Test { - name: "Missing packet".to_string(), - raw: RawMsgTimeout { - packet: None, - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Missing proof".to_string(), - raw: RawMsgTimeout { - proof_unreceived: Vec::new(), - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Missing proof height".to_string(), - raw: RawMsgTimeout { - proof_height: None, - ..default_raw_msg.clone() - }, - want_pass: false, - }, - Test { - name: "Empty signer".to_string(), - raw: RawMsgTimeout { - signer: get_dummy_bech32_account(), - ..default_raw_msg - }, - want_pass: true, - }, - ]; - - for test in tests { - let res_msg: Result = test.raw.clone().try_into(); - - assert_eq!( - res_msg.is_ok(), - test.want_pass, - "MsgTimeout::try_from failed for test {} \nraw message: {:?} with error: {:?}", - test.name, - test.raw, - res_msg.err() - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_msg_timeout(15, 0); - let msg = MsgTimeout::try_from(raw.clone()).unwrap(); - let raw_back = RawMsgTimeout::from(msg.clone()); - let msg_back = MsgTimeout::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + use crate::prelude::*; + + use test_log::test; + + use ibc_proto::ibc::core::channel::v1::MsgTimeout as RawMsgTimeout; + + use crate::{ + core::ics04_channel::{ + error::Error, + msgs::timeout::{test_util::get_dummy_raw_msg_timeout, MsgTimeout}, + }, + test_utils::get_dummy_bech32_account, + }; + + #[test] + fn msg_timeout_try_from_raw() { + struct Test { + name: String, + raw: RawMsgTimeout, + want_pass: bool, + } + + let height = 50; + let timeout_timestamp = 0; + let default_raw_msg = get_dummy_raw_msg_timeout(height, timeout_timestamp); + + let tests: Vec = vec![ + Test { + name: "Good parameters".to_string(), + raw: default_raw_msg.clone(), + want_pass: true, + }, + Test { + name: "Missing packet".to_string(), + raw: RawMsgTimeout { packet: None, ..default_raw_msg.clone() }, + want_pass: false, + }, + Test { + name: "Missing proof".to_string(), + raw: RawMsgTimeout { proof_unreceived: Vec::new(), ..default_raw_msg.clone() }, + want_pass: false, + }, + Test { + name: "Missing proof height".to_string(), + raw: RawMsgTimeout { proof_height: None, ..default_raw_msg.clone() }, + want_pass: false, + }, + Test { + name: "Empty signer".to_string(), + raw: RawMsgTimeout { signer: get_dummy_bech32_account(), ..default_raw_msg }, + want_pass: true, + }, + ]; + + for test in tests { + let res_msg: Result = test.raw.clone().try_into(); + + assert_eq!( + res_msg.is_ok(), + test.want_pass, + "MsgTimeout::try_from failed for test {} \nraw message: {:?} with error: {:?}", + test.name, + test.raw, + res_msg.err() + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_msg_timeout(15, 0); + let msg = MsgTimeout::try_from(raw.clone()).unwrap(); + let raw_back = RawMsgTimeout::from(msg.clone()); + let msg_back = MsgTimeout::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } } diff --git a/modules/src/core/ics04_channel/msgs/timeout_on_close.rs b/modules/src/core/ics04_channel/msgs/timeout_on_close.rs index 05fda53a54..84548335f5 100644 --- a/modules/src/core/ics04_channel/msgs/timeout_on_close.rs +++ b/modules/src/core/ics04_channel/msgs/timeout_on_close.rs @@ -3,130 +3,119 @@ use crate::prelude::*; use ibc_proto::ibc::core::channel::v1::MsgTimeoutOnClose as RawMsgTimeoutOnClose; use tendermint_proto::Protobuf; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::packet::{Packet, Sequence}; -use crate::proofs::Proofs; -use crate::signer::Signer; -use crate::tx_msg::Msg; +use crate::{ + core::ics04_channel::{ + error::Error, + packet::{Packet, Sequence}, + }, + proofs::Proofs, + signer::Signer, + tx_msg::Msg, +}; pub const TYPE_URL: &str = "/ibc.core.channel.v1.MsgTimeoutOnClose"; /// /// Message definition for packet timeout domain type. -/// #[derive(Clone, Debug, PartialEq)] pub struct MsgTimeoutOnClose { - pub packet: Packet, - pub next_sequence_recv: Sequence, - pub proofs: Proofs, - pub signer: Signer, + pub packet: Packet, + pub next_sequence_recv: Sequence, + pub proofs: Proofs, + pub signer: Signer, } impl MsgTimeoutOnClose { - pub fn new( - packet: Packet, - next_sequence_recv: Sequence, - proofs: Proofs, - signer: Signer, - ) -> MsgTimeoutOnClose { - Self { - packet, - next_sequence_recv, - proofs, - signer, - } - } + pub fn new( + packet: Packet, + next_sequence_recv: Sequence, + proofs: Proofs, + signer: Signer, + ) -> MsgTimeoutOnClose { + Self { packet, next_sequence_recv, proofs, signer } + } } impl Msg for MsgTimeoutOnClose { - type ValidationError = Error; - type Raw = RawMsgTimeoutOnClose; + type ValidationError = Error; + type Raw = RawMsgTimeoutOnClose; - fn route(&self) -> String { - crate::keys::ROUTER_KEY.to_string() - } + fn route(&self) -> String { + crate::keys::ROUTER_KEY.to_string() + } - fn type_url(&self) -> String { - TYPE_URL.to_string() - } + fn type_url(&self) -> String { + TYPE_URL.to_string() + } } impl Protobuf for MsgTimeoutOnClose {} impl TryFrom for MsgTimeoutOnClose { - type Error = Error; - - fn try_from(raw_msg: RawMsgTimeoutOnClose) -> Result { - let proofs = Proofs::new( - raw_msg - .proof_unreceived - .try_into() - .map_err(Error::invalid_proof)?, - None, - None, - None, - raw_msg - .proof_height - .ok_or_else(Error::missing_height)? - .into(), - ) - .map_err(Error::invalid_proof)?; - - // TODO: Domain type verification for the next sequence: this should probably be > 0. - - Ok(MsgTimeoutOnClose { - packet: raw_msg - .packet - .ok_or_else(Error::missing_packet)? - .try_into()?, - next_sequence_recv: Sequence::from(raw_msg.next_sequence_recv), - signer: raw_msg.signer.parse().map_err(Error::signer)?, - proofs, - }) - } + type Error = Error; + + fn try_from(raw_msg: RawMsgTimeoutOnClose) -> Result { + let proofs = Proofs::new( + raw_msg.proof_unreceived.try_into().map_err(Error::invalid_proof)?, + None, + None, + Some(raw_msg.proof_close.try_into().map_err(Error::invalid_proof)?), + raw_msg.proof_height.ok_or_else(Error::missing_height)?.into(), + ) + .map_err(Error::invalid_proof)?; + + // TODO: Domain type verification for the next sequence: this should probably be > 0. + + Ok(MsgTimeoutOnClose { + packet: raw_msg.packet.ok_or_else(Error::missing_packet)?.try_into()?, + next_sequence_recv: Sequence::from(raw_msg.next_sequence_recv), + signer: raw_msg.signer.parse().map_err(Error::signer)?, + proofs, + }) + } } impl From for RawMsgTimeoutOnClose { - fn from(domain_msg: MsgTimeoutOnClose) -> Self { - RawMsgTimeoutOnClose { - packet: Some(domain_msg.packet.into()), - proof_unreceived: domain_msg.proofs.object_proof().clone().into(), - proof_close: domain_msg - .proofs - .other_proof() - .clone() - .map_or_else(Vec::new, |v| v.into()), - proof_height: Some(domain_msg.proofs.height().into()), - next_sequence_recv: domain_msg.next_sequence_recv.into(), - signer: domain_msg.signer.to_string(), - } - } + fn from(domain_msg: MsgTimeoutOnClose) -> Self { + RawMsgTimeoutOnClose { + packet: Some(domain_msg.packet.into()), + proof_unreceived: domain_msg.proofs.object_proof().clone().into(), + proof_close: domain_msg + .proofs + .other_proof() + .clone() + .map_or_else(Vec::new, |v| v.into()), + proof_height: Some(domain_msg.proofs.height().into()), + next_sequence_recv: domain_msg.next_sequence_recv.into(), + signer: domain_msg.signer.to_string(), + } + } } #[cfg(test)] pub mod test_util { - use ibc_proto::ibc::core::channel::v1::MsgTimeoutOnClose as RawMsgTimeoutOnClose; - use ibc_proto::ibc::core::client::v1::Height as RawHeight; - - use crate::core::ics04_channel::packet::test_utils::get_dummy_raw_packet; - use crate::test_utils::{get_dummy_bech32_account, get_dummy_proof}; - - /// Returns a dummy `RawMsgTimeoutOnClose`, for testing only! - /// The `height` parametrizes both the proof height as well as the timeout height. - pub fn get_dummy_raw_msg_timeout_on_close( - height: u64, - timeout_timestamp: u64, - ) -> RawMsgTimeoutOnClose { - RawMsgTimeoutOnClose { - packet: Some(get_dummy_raw_packet(height, timeout_timestamp)), - proof_unreceived: get_dummy_proof(), - proof_close: get_dummy_proof(), - proof_height: Some(RawHeight { - revision_number: 0, - revision_height: height, - }), - next_sequence_recv: 1, - signer: get_dummy_bech32_account(), - } - } + use ibc_proto::ibc::core::{ + channel::v1::MsgTimeoutOnClose as RawMsgTimeoutOnClose, client::v1::Height as RawHeight, + }; + + use crate::{ + core::ics04_channel::packet::test_utils::get_dummy_raw_packet, + test_utils::{get_dummy_bech32_account, get_dummy_proof}, + }; + + /// Returns a dummy `RawMsgTimeoutOnClose`, for testing only! + /// The `height` parametrizes both the proof height as well as the timeout height. + pub fn get_dummy_raw_msg_timeout_on_close( + height: u64, + timeout_timestamp: u64, + ) -> RawMsgTimeoutOnClose { + RawMsgTimeoutOnClose { + packet: Some(get_dummy_raw_packet(height, timeout_timestamp)), + proof_unreceived: get_dummy_proof(), + proof_close: get_dummy_proof(), + proof_height: Some(RawHeight { revision_number: 0, revision_height: height }), + next_sequence_recv: 1, + signer: get_dummy_bech32_account(), + } + } } diff --git a/modules/src/core/ics04_channel/packet.rs b/modules/src/core/ics04_channel/packet.rs index 3d6a49886b..38a008f35a 100644 --- a/modules/src/core/ics04_channel/packet.rs +++ b/modules/src/core/ics04_channel/packet.rs @@ -6,332 +6,353 @@ use serde_derive::{Deserialize, Serialize}; use ibc_proto::ibc::core::channel::v1::Packet as RawPacket; -use crate::core::ics04_channel::error::Error; -use crate::core::ics24_host::identifier::{ChannelId, PortId}; -use crate::events::{extract_attribute, Error as EventError, RawObject}; -use crate::timestamp::{Expiry::Expired, Timestamp}; -use crate::Height; +use crate::{ + core::{ + ics04_channel::error::Error, + ics24_host::identifier::{ChannelId, PortId}, + }, + events::{extract_attribute, Error as EventError, RawObject}, + timestamp::{Expiry::Expired, Timestamp}, + Height, +}; use super::handler::{ - acknowledgement::AckPacketResult, recv_packet::RecvPacketResult, send_packet::SendPacketResult, - timeout::TimeoutPacketResult, write_acknowledgement::WriteAckPacketResult, + acknowledgement::AckPacketResult, recv_packet::RecvPacketResult, send_packet::SendPacketResult, + timeout::TimeoutPacketResult, write_acknowledgement::WriteAckPacketResult, }; /// Enumeration of proof carrying ICS4 message, helper for relayer. #[derive(Clone, Debug, PartialEq, Eq)] pub enum PacketMsgType { - Recv, - Ack, - TimeoutUnordered, - TimeoutOrdered, - TimeoutOnClose, + Recv, + Ack, + TimeoutUnordered, + TimeoutOrdered, + TimeoutOnClose, } #[derive(Clone, Debug)] pub enum PacketResult { - Send(SendPacketResult), - Recv(RecvPacketResult), - WriteAck(WriteAckPacketResult), - Ack(AckPacketResult), - Timeout(TimeoutPacketResult), + Send(SendPacketResult), + Recv(RecvPacketResult), + WriteAck(WriteAckPacketResult), + Ack(AckPacketResult), + Timeout(TimeoutPacketResult), } #[derive(Clone, Debug)] pub enum Receipt { - Ok, + Ok, } impl core::fmt::Display for PacketMsgType { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - match self { - PacketMsgType::Recv => write!(f, "(PacketMsgType::Recv)"), - PacketMsgType::Ack => write!(f, "(PacketMsgType::Ack)"), - PacketMsgType::TimeoutUnordered => write!(f, "(PacketMsgType::TimeoutUnordered)"), - PacketMsgType::TimeoutOrdered => write!(f, "(PacketMsgType::TimeoutOrdered)"), - PacketMsgType::TimeoutOnClose => write!(f, "(PacketMsgType::TimeoutOnClose)"), - } - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + PacketMsgType::Recv => write!(f, "(PacketMsgType::Recv)"), + PacketMsgType::Ack => write!(f, "(PacketMsgType::Ack)"), + PacketMsgType::TimeoutUnordered => write!(f, "(PacketMsgType::TimeoutUnordered)"), + PacketMsgType::TimeoutOrdered => write!(f, "(PacketMsgType::TimeoutOrdered)"), + PacketMsgType::TimeoutOnClose => write!(f, "(PacketMsgType::TimeoutOnClose)"), + } + } } /// The sequence number of a packet enforces ordering among packets from the same source. #[derive( - Copy, Clone, Debug, Default, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize, Serialize, + Copy, Clone, Debug, Default, PartialEq, Eq, Hash, PartialOrd, Ord, Deserialize, Serialize, )] pub struct Sequence(u64); impl FromStr for Sequence { - type Err = Error; - - fn from_str(s: &str) -> Result { - Ok(Self::from(s.parse::().map_err(|e| { - Error::invalid_string_as_sequence(s.to_string(), e) - })?)) - } + type Err = Error; + + fn from_str(s: &str) -> Result { + Ok(Self::from( + s.parse::() + .map_err(|e| Error::invalid_string_as_sequence(s.to_string(), e))?, + )) + } } impl Sequence { - pub fn is_zero(&self) -> bool { - self.0 == 0 - } + pub fn is_zero(&self) -> bool { + self.0 == 0 + } - pub fn increment(&self) -> Sequence { - Sequence(self.0 + 1) - } + pub fn increment(&self) -> Sequence { + Sequence(self.0 + 1) + } } impl From for Sequence { - fn from(seq: u64) -> Self { - Sequence(seq) - } + fn from(seq: u64) -> Self { + Sequence(seq) + } } impl From for u64 { - fn from(s: Sequence) -> u64 { - s.0 - } + fn from(s: Sequence) -> u64 { + s.0 + } } impl core::fmt::Display for Sequence { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(f, "{}", self.0) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(f, "{}", self.0) + } } #[derive(Clone, Default, Hash, PartialEq, Eq, Deserialize, Serialize)] pub struct Packet { - pub sequence: Sequence, - pub source_port: PortId, - pub source_channel: ChannelId, - pub destination_port: PortId, - pub destination_channel: ChannelId, - #[serde(serialize_with = "crate::serializers::ser_hex_upper")] - pub data: Vec, - pub timeout_height: Height, - pub timeout_timestamp: Timestamp, + pub sequence: Sequence, + pub source_port: PortId, + pub source_channel: ChannelId, + pub destination_port: PortId, + pub destination_channel: ChannelId, + #[serde( + serialize_with = "crate::serializers::ser_hex_upper", + deserialize_with = "crate::serializers::deser_hex_upper" + )] + pub data: Vec, + pub timeout_height: Height, + pub timeout_timestamp: Timestamp, } struct PacketData<'a>(&'a [u8]); impl<'a> core::fmt::Debug for PacketData<'a> { - fn fmt(&self, formatter: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!(formatter, "{:?}", self.0) - } + fn fmt(&self, formatter: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!(formatter, "{:?}", self.0) + } } impl core::fmt::Debug for Packet { - fn fmt(&self, formatter: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - // Remember: if you alter the definition of `Packet`, - // 1. update the formatter debug struct builder calls (return object of - // this function) - // 2. update this destructuring assignment accordingly - let Packet { - sequence: _, - source_port: _, - source_channel: _, - destination_port: _, - destination_channel: _, - data, - timeout_height: _, - timeout_timestamp: _, - } = self; - let data_wrapper = PacketData(data); - - formatter - .debug_struct("Packet") - .field("sequence", &self.sequence) - .field("source_port", &self.source_port) - .field("source_channel", &self.source_channel) - .field("destination_port", &self.destination_port) - .field("destination_channel", &self.destination_channel) - .field("data", &data_wrapper) - .field("timeout_height", &self.timeout_height) - .field("timeout_timestamp", &self.timeout_timestamp) - .finish() - } + fn fmt(&self, formatter: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + // Remember: if you alter the definition of `Packet`, + // 1. update the formatter debug struct builder calls (return object of + // this function) + // 2. update this destructuring assignment accordingly + let Packet { + sequence: _, + source_port: _, + source_channel: _, + destination_port: _, + destination_channel: _, + data, + timeout_height: _, + timeout_timestamp: _, + } = self; + let data_wrapper = PacketData(data); + + formatter + .debug_struct("Packet") + .field("sequence", &self.sequence) + .field("source_port", &self.source_port) + .field("source_channel", &self.source_channel) + .field("destination_port", &self.destination_port) + .field("destination_channel", &self.destination_channel) + .field("data", &data_wrapper) + .field("timeout_height", &self.timeout_height) + .field("timeout_timestamp", &self.timeout_timestamp) + .finish() + } +} + +pub enum TimeoutVariant { + Height, + Timestamp, + Both, } impl Packet { - /// Checks whether a packet from a - /// [`SendPacket`](crate::core::ics04_channel::events::SendPacket) - /// event is timed-out relative to the current state of the - /// destination chain. - /// - /// Checks both for time-out relative to the destination chain's - /// current timestamp `dst_chain_ts` as well as relative to - /// the height `dst_chain_height`. - /// - /// Note: a timed-out packet should result in a - /// [`MsgTimeout`](crate::core::ics04_channel::msgs::timeout::MsgTimeout), - /// instead of the common-case where it results in - /// [`MsgRecvPacket`](crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket). - pub fn timed_out(&self, dst_chain_ts: &Timestamp, dst_chain_height: Height) -> bool { - (self.timeout_height != Height::zero() && self.timeout_height < dst_chain_height) - || (self.timeout_timestamp != Timestamp::none() - && dst_chain_ts.check_expiry(&self.timeout_timestamp) == Expired) - } + /// Checks whether a packet from a + /// [`SendPacket`](crate::core::ics04_channel::events::SendPacket) + /// event is timed-out relative to the current state of the + /// destination chain. + /// + /// Checks both for time-out relative to the destination chain's + /// current timestamp `dst_chain_ts` as well as relative to + /// the height `dst_chain_height`. + /// + /// Note: a timed-out packet should result in a + /// [`MsgTimeout`](crate::core::ics04_channel::msgs::timeout::MsgTimeout), + /// instead of the common-case where it results in + /// [`MsgRecvPacket`](crate::core::ics04_channel::msgs::recv_packet::MsgRecvPacket). + pub fn timed_out(&self, dst_chain_ts: &Timestamp, dst_chain_height: Height) -> bool { + (self.timeout_height != Height::zero() && self.timeout_height <= dst_chain_height) || + (self.timeout_timestamp != Timestamp::none() && + dst_chain_ts.check_expiry(&self.timeout_timestamp) == Expired) + } + + pub fn timeout_variant( + packet: &Packet, + dst_chain_ts: &Timestamp, + dst_chain_height: Height, + ) -> Option { + let height_timeout = + packet.timeout_height != Height::zero() && packet.timeout_height <= dst_chain_height; + let timestamp_timeout = packet.timeout_timestamp != Timestamp::none() && + (dst_chain_ts.check_expiry(&packet.timeout_timestamp) == Expired); + if height_timeout && !timestamp_timeout { + Some(TimeoutVariant::Height) + } else if timestamp_timeout && !height_timeout { + Some(TimeoutVariant::Timestamp) + } else if timestamp_timeout && height_timeout { + Some(TimeoutVariant::Both) + } else { + None + } + } } /// Custom debug output to omit the packet data impl core::fmt::Display for Packet { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - write!( - f, - "seq:{}, path:{}/{}->{}/{}, toh:{}, tos:{})", - self.sequence, - self.source_channel, - self.source_port, - self.destination_channel, - self.destination_port, - self.timeout_height, - self.timeout_timestamp - ) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + write!( + f, + "seq:{}, path:{}/{}->{}/{}, toh:{}, tos:{})", + self.sequence, + self.source_channel, + self.source_port, + self.destination_channel, + self.destination_port, + self.timeout_height, + self.timeout_timestamp + ) + } } impl TryFrom for Packet { - type Error = Error; - - fn try_from(raw_pkt: RawPacket) -> Result { - if Sequence::from(raw_pkt.sequence).is_zero() { - return Err(Error::zero_packet_sequence()); - } - let packet_timeout_height: Height = raw_pkt - .timeout_height - .ok_or_else(Error::missing_height)? - .into(); - - if packet_timeout_height.is_zero() && raw_pkt.timeout_timestamp == 0 { - return Err(Error::zero_packet_timeout()); - } - if raw_pkt.data.is_empty() { - return Err(Error::zero_packet_data()); - } - - let timeout_timestamp = Timestamp::from_nanoseconds(raw_pkt.timeout_timestamp) - .map_err(Error::invalid_packet_timestamp)?; - - Ok(Packet { - sequence: Sequence::from(raw_pkt.sequence), - source_port: raw_pkt.source_port.parse().map_err(Error::identifier)?, - source_channel: raw_pkt.source_channel.parse().map_err(Error::identifier)?, - destination_port: raw_pkt - .destination_port - .parse() - .map_err(Error::identifier)?, - destination_channel: raw_pkt - .destination_channel - .parse() - .map_err(Error::identifier)?, - data: raw_pkt.data, - timeout_height: packet_timeout_height, - timeout_timestamp, - }) - } + type Error = Error; + + fn try_from(raw_pkt: RawPacket) -> Result { + if Sequence::from(raw_pkt.sequence).is_zero() { + return Err(Error::zero_packet_sequence()) + } + let packet_timeout_height: Height = + raw_pkt.timeout_height.ok_or_else(Error::missing_height)?.into(); + + if packet_timeout_height.is_zero() && raw_pkt.timeout_timestamp == 0 { + return Err(Error::zero_packet_timeout()) + } + if raw_pkt.data.is_empty() { + return Err(Error::zero_packet_data()) + } + + let timeout_timestamp = Timestamp::from_nanoseconds(raw_pkt.timeout_timestamp) + .map_err(Error::invalid_packet_timestamp)?; + + Ok(Packet { + sequence: Sequence::from(raw_pkt.sequence), + source_port: raw_pkt.source_port.parse().map_err(Error::identifier)?, + source_channel: raw_pkt.source_channel.parse().map_err(Error::identifier)?, + destination_port: raw_pkt.destination_port.parse().map_err(Error::identifier)?, + destination_channel: raw_pkt.destination_channel.parse().map_err(Error::identifier)?, + data: raw_pkt.data, + timeout_height: packet_timeout_height, + timeout_timestamp, + }) + } } impl TryFrom> for Packet { - type Error = EventError; - fn try_from(obj: RawObject<'_>) -> Result { - Ok(Packet { - sequence: extract_attribute(&obj, &format!("{}.packet_sequence", obj.action))? - .parse() - .map_err(EventError::channel)?, - source_port: extract_attribute(&obj, &format!("{}.packet_src_port", obj.action))? - .parse() - .map_err(EventError::parse)?, - source_channel: extract_attribute(&obj, &format!("{}.packet_src_channel", obj.action))? - .parse() - .map_err(EventError::parse)?, - destination_port: extract_attribute(&obj, &format!("{}.packet_dst_port", obj.action))? - .parse() - .map_err(EventError::parse)?, - destination_channel: extract_attribute( - &obj, - &format!("{}.packet_dst_channel", obj.action), - )? - .parse() - .map_err(EventError::parse)?, - data: vec![], - timeout_height: extract_attribute( - &obj, - &format!("{}.packet_timeout_height", obj.action), - )? - .parse() - .map_err(EventError::height)?, - timeout_timestamp: extract_attribute( - &obj, - &format!("{}.packet_timeout_timestamp", obj.action), - )? - .parse() - .map_err(EventError::timestamp)?, - }) - } + type Error = EventError; + fn try_from(obj: RawObject<'_>) -> Result { + Ok(Packet { + sequence: extract_attribute(&obj, &format!("{}.packet_sequence", obj.action))? + .parse() + .map_err(EventError::channel)?, + source_port: extract_attribute(&obj, &format!("{}.packet_src_port", obj.action))? + .parse() + .map_err(EventError::parse)?, + source_channel: extract_attribute(&obj, &format!("{}.packet_src_channel", obj.action))? + .parse() + .map_err(EventError::parse)?, + destination_port: extract_attribute(&obj, &format!("{}.packet_dst_port", obj.action))? + .parse() + .map_err(EventError::parse)?, + destination_channel: extract_attribute( + &obj, + &format!("{}.packet_dst_channel", obj.action), + )? + .parse() + .map_err(EventError::parse)?, + data: vec![], + timeout_height: extract_attribute( + &obj, + &format!("{}.packet_timeout_height", obj.action), + )? + .parse() + .map_err(EventError::height)?, + timeout_timestamp: extract_attribute( + &obj, + &format!("{}.packet_timeout_timestamp", obj.action), + )? + .parse() + .map_err(EventError::timestamp)?, + }) + } } impl From for RawPacket { - fn from(packet: Packet) -> Self { - RawPacket { - sequence: packet.sequence.0, - source_port: packet.source_port.to_string(), - source_channel: packet.source_channel.to_string(), - destination_port: packet.destination_port.to_string(), - destination_channel: packet.destination_channel.to_string(), - data: packet.data, - timeout_height: Some(packet.timeout_height.into()), - timeout_timestamp: packet.timeout_timestamp.nanoseconds(), - } - } + fn from(packet: Packet) -> Self { + RawPacket { + sequence: packet.sequence.0, + source_port: packet.source_port.to_string(), + source_channel: packet.source_channel.to_string(), + destination_port: packet.destination_port.to_string(), + destination_channel: packet.destination_channel.to_string(), + data: packet.data, + timeout_height: Some(packet.timeout_height.into()), + timeout_timestamp: packet.timeout_timestamp.nanoseconds(), + } + } } #[cfg(test)] pub mod test_utils { - use crate::prelude::*; - use ibc_proto::ibc::core::channel::v1::Packet as RawPacket; - use ibc_proto::ibc::core::client::v1::Height as RawHeight; - - use crate::core::ics24_host::identifier::{ChannelId, PortId}; - - /// Returns a dummy `RawPacket`, for testing only! - pub fn get_dummy_raw_packet(timeout_height: u64, timeout_timestamp: u64) -> RawPacket { - RawPacket { - sequence: 1, - source_port: PortId::default().to_string(), - source_channel: ChannelId::default().to_string(), - destination_port: PortId::default().to_string(), - destination_channel: ChannelId::default().to_string(), - data: vec![0], - timeout_height: Some(RawHeight { - revision_number: 0, - revision_height: timeout_height, - }), - timeout_timestamp, - } - } + use crate::prelude::*; + use ibc_proto::ibc::core::{channel::v1::Packet as RawPacket, client::v1::Height as RawHeight}; + + use crate::core::ics24_host::identifier::{ChannelId, PortId}; + + /// Returns a dummy `RawPacket`, for testing only! + pub fn get_dummy_raw_packet(timeout_height: u64, timeout_timestamp: u64) -> RawPacket { + RawPacket { + sequence: 1, + source_port: PortId::default().to_string(), + source_channel: ChannelId::default().to_string(), + destination_port: PortId::default().to_string(), + destination_channel: ChannelId::default().to_string(), + data: vec![0], + timeout_height: Some(RawHeight { revision_number: 0, revision_height: timeout_height }), + timeout_timestamp, + } + } } #[cfg(test)] mod tests { - use crate::prelude::*; + use crate::prelude::*; - use test_log::test; + use test_log::test; - use ibc_proto::ibc::core::channel::v1::Packet as RawPacket; + use ibc_proto::ibc::core::channel::v1::Packet as RawPacket; - use crate::core::ics04_channel::packet::test_utils::get_dummy_raw_packet; - use crate::core::ics04_channel::packet::Packet; + use crate::core::ics04_channel::packet::{test_utils::get_dummy_raw_packet, Packet}; - #[test] - fn packet_try_from_raw() { - struct Test { - name: String, - raw: RawPacket, - want_pass: bool, - } + #[test] + fn packet_try_from_raw() { + struct Test { + name: String, + raw: RawPacket, + want_pass: bool, + } - let proof_height = 10; - let default_raw_msg = get_dummy_raw_packet(proof_height, 0); + let proof_height = 10; + let default_raw_msg = get_dummy_raw_packet(proof_height, 0); - let tests: Vec = vec![ + let tests: Vec = vec![ Test { name: "Good parameters".to_string(), raw: default_raw_msg.clone(), @@ -443,27 +464,35 @@ mod tests { }, ]; - for test in tests { - let res_msg = Packet::try_from(test.raw.clone()); - - assert_eq!( - test.want_pass, - res_msg.is_ok(), - "Packet::try_from failed for test {}, \nraw packet {:?} with error {:?}", - test.name, - test.raw, - res_msg.err(), - ); - } - } - - #[test] - fn to_and_from() { - let raw = get_dummy_raw_packet(15, 0); - let msg = Packet::try_from(raw.clone()).unwrap(); - let raw_back = RawPacket::from(msg.clone()); - let msg_back = Packet::try_from(raw_back.clone()).unwrap(); - assert_eq!(raw, raw_back); - assert_eq!(msg, msg_back); - } + for test in tests { + let res_msg = Packet::try_from(test.raw.clone()); + + assert_eq!( + test.want_pass, + res_msg.is_ok(), + "Packet::try_from failed for test {}, \nraw packet {:?} with error {:?}", + test.name, + test.raw, + res_msg.err(), + ); + } + } + + #[test] + fn to_and_from() { + let raw = get_dummy_raw_packet(15, 0); + let msg = Packet::try_from(raw.clone()).unwrap(); + let raw_back = RawPacket::from(msg.clone()); + let msg_back = Packet::try_from(raw_back.clone()).unwrap(); + assert_eq!(raw, raw_back); + assert_eq!(msg, msg_back); + } + + #[test] + fn serialize_and_deserialize_packet() { + let packet = Packet { data: vec![5; 32], ..Default::default() }; + let json = serde_json::to_string(&packet).unwrap(); + let deserialized_packet: Packet = serde_json::from_str(&json).unwrap(); + assert_eq!(packet, deserialized_packet); + } } diff --git a/modules/src/core/ics04_channel/version.rs b/modules/src/core/ics04_channel/version.rs index b0f7918dd0..e5e102b260 100644 --- a/modules/src/core/ics04_channel/version.rs +++ b/modules/src/core/ics04_channel/version.rs @@ -1,14 +1,10 @@ //! Data type definition and utilities for the //! version field of a channel end. -//! -use core::convert::Infallible; -use core::fmt; -use core::str::FromStr; +use core::{convert::Infallible, fmt, str::FromStr}; use serde_derive::{Deserialize, Serialize}; -use crate::applications::transfer; -use crate::prelude::*; +use crate::{applications::transfer, prelude::*}; /// The version field for a `ChannelEnd`. /// @@ -19,42 +15,42 @@ use crate::prelude::*; pub struct Version(String); impl Version { - pub fn new(v: String) -> Self { - Self(v) - } + pub fn new(v: String) -> Self { + Self(v) + } - pub fn ics20() -> Self { - Self::new(transfer::VERSION.to_string()) - } + pub fn ics20() -> Self { + Self::new(transfer::VERSION.to_string()) + } - pub fn empty() -> Self { - Self::new("".to_string()) - } + pub fn empty() -> Self { + Self::new("".to_string()) + } } impl From for Version { - fn from(s: String) -> Self { - Self::new(s) - } + fn from(s: String) -> Self { + Self::new(s) + } } impl FromStr for Version { - type Err = Infallible; + type Err = Infallible; - fn from_str(s: &str) -> Result { - Ok(Self::new(s.to_string())) - } + fn from_str(s: &str) -> Result { + Ok(Self::new(s.to_string())) + } } /// The default version is empty (unspecified). impl Default for Version { - fn default() -> Self { - Version::empty() - } + fn default() -> Self { + Version::empty() + } } impl fmt::Display for Version { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } } diff --git a/modules/src/core/ics05_port/context.rs b/modules/src/core/ics05_port/context.rs index 2c59d4a592..ce07d1911a 100644 --- a/modules/src/core/ics05_port/context.rs +++ b/modules/src/core/ics05_port/context.rs @@ -1,10 +1,13 @@ -use crate::core::ics05_port::error::Error; -use crate::core::ics24_host::identifier::PortId; -use crate::core::ics26_routing::context::ModuleId; -use crate::prelude::*; +use crate::{ + core::{ + ics05_port::error::Error, ics24_host::identifier::PortId, ics26_routing::context::ModuleId, + }, + prelude::*, +}; -/// A context supplying all the necessary read-only dependencies for processing any information regarding a port. +/// A context supplying all the necessary read-only dependencies for processing any information +/// regarding a port. pub trait PortReader { - /// Return the module_id associated with a given port_id - fn lookup_module_by_port(&self, port_id: &PortId) -> Result; + /// Return the module_id associated with a given port_id + fn lookup_module_by_port(&self, port_id: &PortId) -> Result; } diff --git a/modules/src/core/ics05_port/error.rs b/modules/src/core/ics05_port/error.rs index 4c49d3d800..9f2a299501 100644 --- a/modules/src/core/ics05_port/error.rs +++ b/modules/src/core/ics05_port/error.rs @@ -1,22 +1,23 @@ -use crate::core::ics24_host::identifier::PortId; +use crate::{core::ics24_host::identifier::PortId, prelude::*}; use flex_error::define_error; define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - UnknownPort - { port_id: PortId } - | e | { format_args!("port '{0}' is unknown", e.port_id) }, + #[derive(Debug, PartialEq, Eq, derive_more::From)] + Error { + UnknownPort + { port_id: PortId } + | e | { format_args!("port '{0}' is unknown", e.port_id) }, - PortAlreadyBound - { port_id: PortId } - | e | { format_args!("port '{0}' is already bound", e.port_id) }, + PortAlreadyBound + { port_id: PortId } + | e | { format_args!("port '{0}' is already bound", e.port_id) }, - ModuleNotFound - { port_id: PortId } - | e | { format_args!("could not retrieve module from port '{0}'", e.port_id) }, + ModuleNotFound + { port_id: PortId } + | e | { format_args!("could not retrieve module from port '{0}'", e.port_id) }, - ImplementationSpecific - | _ | { "implementation specific error" }, - } + ImplementationSpecific + { reason: String } + | e | { format_args!("implementation specific error: {}", e.reason) }, + } } diff --git a/modules/src/core/ics23_commitment/commitment.rs b/modules/src/core/ics23_commitment/commitment.rs index fe77cb854e..2b8fd4c519 100644 --- a/modules/src/core/ics23_commitment/commitment.rs +++ b/modules/src/core/ics23_commitment/commitment.rs @@ -1,6 +1,4 @@ -use crate::core::ics23_commitment::error::Error; -use crate::prelude::*; -use crate::proofs::ProofError; +use crate::{core::ics23_commitment::error::Error, prelude::*, proofs::ProofError}; use core::{convert::TryFrom, fmt}; use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; @@ -12,37 +10,35 @@ use super::merkle::MerkleProof; #[derive(Clone, PartialEq, Eq, Serialize)] #[serde(transparent)] pub struct CommitmentRoot { - #[serde(serialize_with = "crate::serializers::ser_hex_upper")] - bytes: Vec, + #[serde(serialize_with = "crate::serializers::ser_hex_upper")] + bytes: Vec, } impl fmt::Debug for CommitmentRoot { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let hex = Hex::upper_case().encode_to_string(&self.bytes).unwrap(); - f.debug_tuple("CommitmentRoot").field(&hex).finish() - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let hex = Hex::upper_case().encode_to_string(&self.bytes).unwrap(); + f.debug_tuple("CommitmentRoot").field(&hex).finish() + } } impl CommitmentRoot { - pub fn from_bytes(bytes: &[u8]) -> Self { - Self { - bytes: Vec::from(bytes), - } - } + pub fn from_bytes(bytes: &[u8]) -> Self { + Self { bytes: Vec::from(bytes) } + } - pub fn as_bytes(&self) -> &[u8] { - &self.bytes - } + pub fn as_bytes(&self) -> &[u8] { + &self.bytes + } - pub fn into_vec(self) -> Vec { - self.bytes - } + pub fn into_vec(self) -> Vec { + self.bytes + } } impl From> for CommitmentRoot { - fn from(bytes: Vec) -> Self { - Self { bytes } - } + fn from(bytes: Vec) -> Self { + Self { bytes } + } } #[derive(Clone, Debug, PartialEq)] @@ -51,120 +47,128 @@ pub struct CommitmentPath; #[derive(Clone, PartialEq, Eq, Serialize)] #[serde(transparent)] pub struct CommitmentProofBytes { - #[serde(serialize_with = "crate::serializers::ser_hex_upper")] - bytes: Vec, + #[serde(serialize_with = "crate::serializers::ser_hex_upper")] + bytes: Vec, +} + +impl CommitmentProofBytes { + /// Get proof bytes + pub fn as_bytes(&self) -> &[u8] { + &self.bytes + } } impl fmt::Debug for CommitmentProofBytes { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let hex = Hex::upper_case().encode_to_string(&self.bytes).unwrap(); - f.debug_tuple("CommitmentProof").field(&hex).finish() - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let hex = Hex::upper_case().encode_to_string(&self.bytes).unwrap(); + f.debug_tuple("CommitmentProof").field(&hex).finish() + } } impl TryFrom> for CommitmentProofBytes { - type Error = ProofError; + type Error = ProofError; - fn try_from(bytes: Vec) -> Result { - if bytes.is_empty() { - Err(Self::Error::empty_proof()) - } else { - Ok(Self { bytes }) - } - } + fn try_from(bytes: Vec) -> Result { + if bytes.is_empty() { + Err(Self::Error::empty_proof()) + } else { + Ok(Self { bytes }) + } + } } impl From for Vec { - fn from(p: CommitmentProofBytes) -> Vec { - p.bytes - } + fn from(p: CommitmentProofBytes) -> Vec { + p.bytes + } } impl TryFrom for CommitmentProofBytes { - type Error = ProofError; + type Error = ProofError; - fn try_from(proof: RawMerkleProof) -> Result { - let mut buf = Vec::new(); - prost::Message::encode(&proof, &mut buf).unwrap(); - buf.try_into() - } + fn try_from(proof: RawMerkleProof) -> Result { + let mut buf = Vec::new(); + prost::Message::encode(&proof, &mut buf).unwrap(); + buf.try_into() + } } -impl TryFrom for CommitmentProofBytes { - type Error = ProofError; +impl TryFrom> for CommitmentProofBytes { + type Error = ProofError; - fn try_from(value: MerkleProof) -> Result { - Self::try_from(RawMerkleProof::from(value)) - } + fn try_from(value: MerkleProof) -> Result { + Self::try_from(RawMerkleProof::from(value)) + } } impl TryFrom for RawMerkleProof { - type Error = Error; + type Error = Error; - fn try_from(value: CommitmentProofBytes) -> Result { - let value: Vec = value.into(); - let res: RawMerkleProof = - prost::Message::decode(value.as_ref()).map_err(Error::invalid_raw_merkle_proof)?; - Ok(res) - } + fn try_from(value: CommitmentProofBytes) -> Result { + let value: Vec = value.into(); + let res: RawMerkleProof = + prost::Message::decode(value.as_ref()).map_err(Error::invalid_raw_merkle_proof)?; + Ok(res) + } } #[derive(Clone, PartialEq, Eq, Hash, Deserialize, Default)] pub struct CommitmentPrefix { - bytes: Vec, + bytes: Vec, } impl CommitmentPrefix { - pub fn as_bytes(&self) -> &[u8] { - &self.bytes - } + pub fn as_bytes(&self) -> &[u8] { + &self.bytes + } - pub fn into_vec(self) -> Vec { - self.bytes - } + pub fn into_vec(self) -> Vec { + self.bytes + } } impl TryFrom> for CommitmentPrefix { - type Error = Error; + type Error = Error; - fn try_from(bytes: Vec) -> Result { - if bytes.is_empty() { - Err(Self::Error::empty_commitment_prefix()) - } else { - Ok(Self { bytes }) - } - } + fn try_from(bytes: Vec) -> Result { + if bytes.is_empty() { + Err(Self::Error::empty_commitment_prefix()) + } else { + Ok(Self { bytes }) + } + } } impl fmt::Debug for CommitmentPrefix { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let converted = core::str::from_utf8(self.as_bytes()); - match converted { - Ok(s) => write!(f, "{}", s), - Err(_e) => write!(f, "", self.as_bytes()), - } - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let converted = core::str::from_utf8(self.as_bytes()); + match converted { + Ok(s) => write!(f, "{}", s), + Err(_e) => write!(f, "", self.as_bytes()), + } + } } impl Serialize for CommitmentPrefix { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - format!("{:?}", self).serialize(serializer) - } + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + format!("{:?}", self).serialize(serializer) + } } #[cfg(test)] pub mod test_util { - use crate::prelude::*; - use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; - use ibc_proto::ics23::CommitmentProof; - - /// Returns a dummy `RawMerkleProof`, for testing only! - pub fn get_dummy_merkle_proof() -> RawMerkleProof { - let parsed = CommitmentProof { proof: None }; - let mproofs: Vec = vec![parsed]; - RawMerkleProof { proofs: mproofs } - } + use crate::prelude::*; + use ibc_proto::{ + ibc::core::commitment::v1::MerkleProof as RawMerkleProof, ics23::CommitmentProof, + }; + + /// Returns a dummy `RawMerkleProof`, for testing only! + pub fn get_dummy_merkle_proof() -> RawMerkleProof { + let parsed = CommitmentProof { proof: None }; + let mproofs: Vec = vec![parsed]; + RawMerkleProof { proofs: mproofs } + } } diff --git a/modules/src/core/ics23_commitment/error.rs b/modules/src/core/ics23_commitment/error.rs index 260c9557b4..3bc64d17b7 100644 --- a/modules/src/core/ics23_commitment/error.rs +++ b/modules/src/core/ics23_commitment/error.rs @@ -2,38 +2,38 @@ use flex_error::{define_error, TraceError}; use prost::DecodeError; define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - InvalidRawMerkleProof - [ TraceError ] - |_| { "invalid raw merkle proof" }, + #[derive(Debug, PartialEq, Eq)] + Error { + InvalidRawMerkleProof + [ TraceError ] + |_| { "invalid raw merkle proof" }, - CommitmentProofDecodingFailed - [ TraceError ] - |_| { "failed to decode commitment proof" }, + CommitmentProofDecodingFailed + [ TraceError ] + |_| { "failed to decode commitment proof" }, - EmptyCommitmentPrefix - |_| { "empty commitment prefix" }, + EmptyCommitmentPrefix + |_| { "empty commitment prefix" }, - EmptyMerkleProof - |_| { "empty merkle proof" }, + EmptyMerkleProof + |_| { "empty merkle proof" }, - EmptyMerkleRoot - |_| { "empty merkle root" }, + EmptyMerkleRoot + |_| { "empty merkle root" }, - EmptyVerifiedValue - |_| { "empty verified value" }, + EmptyVerifiedValue + |_| { "empty verified value" }, - NumberOfSpecsMismatch - |_| { "mismatch between the number of proofs with that of specs" }, + NumberOfSpecsMismatch + |_| { "mismatch between the number of proofs with that of specs" }, - NumberOfKeysMismatch - |_| { "mismatch between the number of proofs with that of keys" }, + NumberOfKeysMismatch + |_| { "mismatch between the number of proofs with that of keys" }, - InvalidMerkleProof - |_| { "invalid merkle proof" }, + InvalidMerkleProof + |_| { "invalid merkle proof" }, - VerificationFailure - |_| { "proof verification failed" } - } + VerificationFailure + |_| { "proof verification failed" } + } } diff --git a/modules/src/core/ics23_commitment/merkle.rs b/modules/src/core/ics23_commitment/merkle.rs index fcad180f62..7a74281823 100644 --- a/modules/src/core/ics23_commitment/merkle.rs +++ b/modules/src/core/ics23_commitment/merkle.rs @@ -1,183 +1,180 @@ -use crate::prelude::*; -use tendermint::merkle::proof::Proof as TendermintProof; - -use ibc_proto::ibc::core::commitment::v1::MerklePath; -use ibc_proto::ibc::core::commitment::v1::MerkleProof as RawMerkleProof; -use ibc_proto::ibc::core::commitment::v1::MerkleRoot; -use ics23::commitment_proof::Proof; +use crate::{ + core::ics23_commitment::{ + commitment::{CommitmentPrefix, CommitmentRoot}, + error::Error, + specs::ProofSpecs, + }, + prelude::*, +}; +use core::marker::PhantomData; +use ibc_proto::ibc::core::commitment::v1::{MerklePath, MerkleProof as RawMerkleProof, MerkleRoot}; use ics23::{ - calculate_existence_root, verify_membership, verify_non_membership, CommitmentProof, - NonExistenceProof, + calculate_existence_root, commitment_proof::Proof, verify_membership, verify_non_membership, + CommitmentProof, HostFunctionsProvider, NonExistenceProof, }; -use crate::core::ics23_commitment::commitment::{CommitmentPrefix, CommitmentRoot}; -use crate::core::ics23_commitment::error::Error; -use crate::core::ics23_commitment::specs::ProofSpecs; - pub fn apply_prefix(prefix: &CommitmentPrefix, mut path: Vec) -> MerklePath { - let mut key_path: Vec = vec![format!("{:?}", prefix)]; - key_path.append(&mut path); - MerklePath { key_path } + let mut key_path: Vec = vec![format!("{:?}", prefix)]; + key_path.append(&mut path); + MerklePath { key_path } } impl From for MerkleRoot { - fn from(root: CommitmentRoot) -> Self { - Self { - hash: root.into_vec(), - } - } + fn from(root: CommitmentRoot) -> Self { + Self { hash: root.into_vec() } + } } #[derive(Clone, Debug, PartialEq)] -pub struct MerkleProof { - pub proofs: Vec, +pub struct MerkleProof { + pub proofs: Vec, + _phantom: PhantomData, } /// Convert to ics23::CommitmentProof -/// The encoding and decoding shouldn't fail since ics23::CommitmentProof and ibc_proto::ics23::CommitmentProof should be the same -/// Ref. -impl From for MerkleProof { - fn from(proof: RawMerkleProof) -> Self { - let proofs: Vec = proof - .proofs - .into_iter() - .map(|p| { - let mut encoded = Vec::new(); - prost::Message::encode(&p, &mut encoded).unwrap(); - prost::Message::decode(&*encoded).unwrap() - }) - .collect(); - Self { proofs } - } +/// The encoding and decoding shouldn't fail since ics23::CommitmentProof and +/// ibc_proto::ics23::CommitmentProof should be the same Ref. +impl From for MerkleProof { + fn from(proof: RawMerkleProof) -> Self { + let proofs: Vec = proof + .proofs + .into_iter() + .map(|p| { + let mut encoded = Vec::new(); + prost::Message::encode(&p, &mut encoded).unwrap(); + prost::Message::decode(&*encoded).unwrap() + }) + .collect(); + Self { proofs, _phantom: PhantomData } + } } -impl From for RawMerkleProof { - fn from(proof: MerkleProof) -> Self { - Self { - proofs: proof - .proofs - .into_iter() - .map(|p| { - let mut encoded = Vec::new(); - prost::Message::encode(&p, &mut encoded).unwrap(); - prost::Message::decode(&*encoded).unwrap() - }) - .collect(), - } - } +impl From> for RawMerkleProof { + fn from(proof: MerkleProof) -> Self { + Self { + proofs: proof + .proofs + .into_iter() + .map(|p| { + let mut encoded = Vec::new(); + prost::Message::encode(&p, &mut encoded).unwrap(); + prost::Message::decode(&*encoded).unwrap() + }) + .collect(), + } + } } -impl MerkleProof { - pub fn verify_membership( - &self, - specs: &ProofSpecs, - root: MerkleRoot, - keys: MerklePath, - value: Vec, - start_index: usize, - ) -> Result<(), Error> { - // validate arguments - if self.proofs.is_empty() { - return Err(Error::empty_merkle_proof()); - } - if root.hash.is_empty() { - return Err(Error::empty_merkle_root()); - } - let num = self.proofs.len(); - let ics23_specs = Vec::::from(specs.clone()); - if ics23_specs.len() != num { - return Err(Error::number_of_specs_mismatch()); - } - if keys.key_path.len() != num { - return Err(Error::number_of_keys_mismatch()); - } - if value.is_empty() { - return Err(Error::empty_verified_value()); - } - - let mut subroot = value.clone(); - let mut value = value; - // keys are represented from root-to-leaf - for ((proof, spec), key) in self - .proofs - .iter() - .zip(ics23_specs.iter()) - .zip(keys.key_path.iter().rev()) - .skip(start_index) - { - match &proof.proof { - Some(Proof::Exist(existence_proof)) => { - subroot = calculate_existence_root(existence_proof) - .map_err(|_| Error::invalid_merkle_proof())?; - if !verify_membership(proof, spec, &subroot, key.as_bytes(), &value) { - return Err(Error::verification_failure()); - } - value = subroot.clone(); - } - _ => return Err(Error::invalid_merkle_proof()), - } - } - - if root.hash != subroot { - return Err(Error::verification_failure()); - } - - Ok(()) - } - - pub fn verify_non_membership( - &self, - specs: &ProofSpecs, - root: MerkleRoot, - keys: MerklePath, - ) -> Result<(), Error> { - // validate arguments - if self.proofs.is_empty() { - return Err(Error::empty_merkle_proof()); - } - if root.hash.is_empty() { - return Err(Error::empty_merkle_root()); - } - let num = self.proofs.len(); - let ics23_specs = Vec::::from(specs.clone()); - if ics23_specs.len() != num { - return Err(Error::number_of_specs_mismatch()); - } - if keys.key_path.len() != num { - return Err(Error::number_of_keys_mismatch()); - } - - // verify the absence of key in lowest subtree - let proof = self.proofs.get(0).ok_or_else(Error::invalid_merkle_proof)?; - let spec = ics23_specs.get(0).ok_or_else(Error::invalid_merkle_proof)?; - // keys are represented from root-to-leaf - let key = keys - .key_path - .get(num - 1) - .ok_or_else(Error::invalid_merkle_proof)?; - match &proof.proof { - Some(Proof::Nonexist(non_existence_proof)) => { - let subroot = calculate_non_existence_root(non_existence_proof)?; - if !verify_non_membership(proof, spec, &subroot, key.as_bytes()) { - return Err(Error::verification_failure()); - } - // verify membership proofs starting from index 1 with value = subroot - self.verify_membership(specs, root, keys, subroot, 1) - } - _ => Err(Error::invalid_merkle_proof()), - } - } +impl MerkleProof { + pub fn verify_membership( + &self, + specs: &ProofSpecs, + root: MerkleRoot, + keys: MerklePath, + value: Vec, + start_index: usize, + ) -> Result<(), Error> { + // validate arguments + if self.proofs.is_empty() { + return Err(Error::empty_merkle_proof()) + } + if root.hash.is_empty() { + return Err(Error::empty_merkle_root()) + } + let num = self.proofs.len(); + let ics23_specs = Vec::::from(specs.clone()); + if ics23_specs.len() != num { + return Err(Error::number_of_specs_mismatch()) + } + if keys.key_path.len() != num { + return Err(Error::number_of_keys_mismatch()) + } + if value.is_empty() { + return Err(Error::empty_verified_value()) + } + + let mut subroot = value.clone(); + let mut value = value; + // keys are represented from root-to-leaf + for ((proof, spec), key) in self + .proofs + .iter() + .zip(ics23_specs.iter()) + .zip(keys.key_path.iter().rev()) + .skip(start_index) + { + match &proof.proof { + Some(Proof::Exist(existence_proof)) => { + subroot = calculate_existence_root::(existence_proof) + .map_err(|_| Error::invalid_merkle_proof())?; + if !verify_membership::(proof, spec, &subroot, key.as_bytes(), &value) { + return Err(Error::verification_failure()) + } + value = subroot.clone(); + }, + _ => return Err(Error::invalid_merkle_proof()), + } + } + + if root.hash != subroot { + return Err(Error::verification_failure()) + } + + Ok(()) + } + + pub fn verify_non_membership( + &self, + specs: &ProofSpecs, + root: MerkleRoot, + keys: MerklePath, + ) -> Result<(), Error> { + // validate arguments + if self.proofs.is_empty() { + return Err(Error::empty_merkle_proof()) + } + if root.hash.is_empty() { + return Err(Error::empty_merkle_root()) + } + let num = self.proofs.len(); + let ics23_specs = Vec::::from(specs.clone()); + if ics23_specs.len() != num { + return Err(Error::number_of_specs_mismatch()) + } + if keys.key_path.len() != num { + return Err(Error::number_of_keys_mismatch()) + } + + // verify the absence of key in lowest subtree + let proof = self.proofs.get(0).ok_or_else(Error::invalid_merkle_proof)?; + let spec = ics23_specs.get(0).ok_or_else(Error::invalid_merkle_proof)?; + // keys are represented from root-to-leaf + let key = keys.key_path.get(num - 1).ok_or_else(Error::invalid_merkle_proof)?; + match &proof.proof { + Some(Proof::Nonexist(non_existence_proof)) => { + let subroot = calculate_non_existence_root::(non_existence_proof)?; + if !verify_non_membership::(proof, spec, &subroot, key.as_bytes()) { + return Err(Error::verification_failure()) + } + // verify membership proofs starting from index 1 with value = subroot + self.verify_membership(specs, root, keys, subroot, 1) + }, + _ => Err(Error::invalid_merkle_proof()), + } + } } // TODO move to ics23 -fn calculate_non_existence_root(proof: &NonExistenceProof) -> Result, Error> { - if let Some(left) = &proof.left { - calculate_existence_root(left).map_err(|_| Error::invalid_merkle_proof()) - } else if let Some(right) = &proof.right { - calculate_existence_root(right).map_err(|_| Error::invalid_merkle_proof()) - } else { - Err(Error::invalid_merkle_proof()) - } +fn calculate_non_existence_root( + proof: &NonExistenceProof, +) -> Result, Error> { + if let Some(left) = &proof.left { + calculate_existence_root::(left).map_err(|_| Error::invalid_merkle_proof()) + } else if let Some(right) = &proof.right { + calculate_existence_root::(right).map_err(|_| Error::invalid_merkle_proof()) + } else { + Err(Error::invalid_merkle_proof()) + } } // Merkle Proof serialization notes: @@ -185,17 +182,14 @@ fn calculate_non_existence_root(proof: &NonExistenceProof) -> Result, Er // - TmProof: in tendermint-rs/src/merkle/proof.rs:Proof // - RawProofOps: in tendermint-proto/tendermint.cyrpto.rs:ProofOps // - RawMerkleProof: in ibc-proto/ibc.core.commitment.v1.rs:MerkleProof -// - structure that includes a RawProofOps in its only `proof` field. -// #[derive(Clone, PartialEq, ::prost::Message)] -// pub struct MerkleProof { -// #[prost(message, optional, tag="1")] -// pub proof: ::core::option::Option<::tendermint_proto::crypto::ProofOps>, -// } -// - Vec: RawMerkleProof is not explicitly used but, serialized as Vec, it is -// included in all handshake messages that require proofs (i.e. all except the two `OpenInit`), -// and also in all queries that require proofs -// - MerkleProof: Domain type for RawMerkleProof, currently not used and identical to RawMerkleProof. -// This will change with verification implementation. +// - structure that includes a RawProofOps in its only `proof` field. #[derive(Clone, PartialEq, +// ::prost::Message)] pub struct MerkleProof { #[prost(message, optional, tag="1")] pub proof: +// ::core::option::Option<::tendermint_proto::crypto::ProofOps>, } +// - Vec: RawMerkleProof is not explicitly used but, serialized as Vec, it is included in +// all handshake messages that require proofs (i.e. all except the two `OpenInit`), and also in +// all queries that require proofs +// - MerkleProof: Domain type for RawMerkleProof, currently not used and identical to +// RawMerkleProof. This will change with verification implementation. // - CommitmentProof: Defined in ibc-rs as Vec and currently used in all its messages // // Here are a couple of flows that illustrate the different conversions: @@ -209,15 +203,11 @@ fn calculate_non_existence_root(proof: &NonExistenceProof) -> Result, Er // TmProof -> RawProofOps => RawMerkleProof -> CommitmentProof -> Vec // // Implementations of (de)serializers and conversions: -// - commitment.rs: -// Vec <-> CommitmentProof -// CommitmentProof <-> RawMerkleProof -// - merkle.rs: -// RawMerkleProof <-> MerkleProof -// - tendermint-rs/src/merkle/proof.rs: -// TmProof <-> RawProofOps -// - cosmos.rs:abci_query() converts from query proof to Merkle proof: -// RawProofOps => RawMerkleProof +// - commitment.rs: Vec <-> CommitmentProof CommitmentProof <-> RawMerkleProof +// - merkle.rs: RawMerkleProof <-> MerkleProof +// - tendermint-rs/src/merkle/proof.rs: TmProof <-> RawProofOps +// - cosmos.rs:abci_query() converts from query proof to Merkle proof: RawProofOps => +// RawMerkleProof // // impl TryFrom for MerkleProof { // type Error = Error; @@ -231,17 +221,3 @@ fn calculate_non_existence_root(proof: &NonExistenceProof) -> Result, Er // RawMerkleProof { proof: value.proof } // } // } - -pub fn convert_tm_to_ics_merkle_proof(tm_proof: &TendermintProof) -> Result { - let mut proofs = Vec::new(); - - for op in &tm_proof.ops { - let mut parsed = ibc_proto::ics23::CommitmentProof { proof: None }; - prost::Message::merge(&mut parsed, op.data.as_slice()) - .map_err(Error::commitment_proof_decoding_failed)?; - - proofs.push(parsed); - } - - Ok(MerkleProof::from(RawMerkleProof { proofs })) -} diff --git a/modules/src/core/ics23_commitment/specs.rs b/modules/src/core/ics23_commitment/specs.rs index 53e45a5912..72977501a4 100644 --- a/modules/src/core/ics23_commitment/specs.rs +++ b/modules/src/core/ics23_commitment/specs.rs @@ -14,135 +14,130 @@ use serde::{Deserialize, Serialize}; pub struct ProofSpecs(Vec); impl ProofSpecs { - /// Returns the specification for Cosmos-SDK proofs - pub fn cosmos() -> Self { - vec![ - ics23::iavl_spec(), // Format of proofs-iavl (iavl merkle proofs) - ics23::tendermint_spec(), // Format of proofs-tendermint (crypto/ merkle SimpleProof) - ] - .into() - } - - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } + /// Returns the specification for Cosmos-SDK proofs + pub fn cosmos() -> Self { + vec![ + ics23::iavl_spec(), // Format of proofs-iavl (iavl merkle proofs) + ics23::tendermint_spec(), // Format of proofs-tendermint (crypto/ merkle SimpleProof) + ] + .into() + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } } impl Default for ProofSpecs { - fn default() -> Self { - Self::cosmos() - } + fn default() -> Self { + Self::cosmos() + } } impl From> for ProofSpecs { - fn from(ibc_specs: Vec) -> Self { - Self(ibc_specs.into_iter().map(ProofSpec).collect()) - } + fn from(ibc_specs: Vec) -> Self { + Self(ibc_specs.into_iter().map(ProofSpec).collect()) + } } impl From> for ProofSpecs { - fn from(ics23_specs: Vec) -> Self { - Self( - ics23_specs - .into_iter() - .map(|ics23_spec| ics23_spec.into()) - .collect(), - ) - } + fn from(ics23_specs: Vec) -> Self { + Self(ics23_specs.into_iter().map(|ics23_spec| ics23_spec.into()).collect()) + } } impl From for Vec { - fn from(specs: ProofSpecs) -> Self { - specs.0.into_iter().map(|spec| spec.into()).collect() - } + fn from(specs: ProofSpecs) -> Self { + specs.0.into_iter().map(|spec| spec.into()).collect() + } } impl From for Vec { - fn from(specs: ProofSpecs) -> Self { - specs.0.into_iter().map(|spec| spec.0).collect() - } + fn from(specs: ProofSpecs) -> Self { + specs.0.into_iter().map(|spec| spec.0).collect() + } } #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] struct ProofSpec(IbcProofSpec); impl From for ProofSpec { - fn from(spec: Ics23ProofSpec) -> Self { - Self(IbcProofSpec { - leaf_spec: spec.leaf_spec.map(|lop| LeafOp::from(lop).0), - inner_spec: spec.inner_spec.map(|ispec| InnerSpec::from(ispec).0), - max_depth: spec.max_depth, - min_depth: spec.min_depth, - }) - } + fn from(spec: Ics23ProofSpec) -> Self { + Self(IbcProofSpec { + leaf_spec: spec.leaf_spec.map(|lop| LeafOp::from(lop).0), + inner_spec: spec.inner_spec.map(|ispec| InnerSpec::from(ispec).0), + max_depth: spec.max_depth, + min_depth: spec.min_depth, + }) + } } impl From for Ics23ProofSpec { - fn from(spec: ProofSpec) -> Self { - let spec = spec.0; - Ics23ProofSpec { - leaf_spec: spec.leaf_spec.map(|lop| LeafOp(lop).into()), - inner_spec: spec.inner_spec.map(|ispec| InnerSpec(ispec).into()), - max_depth: spec.max_depth, - min_depth: spec.min_depth, - } - } + fn from(spec: ProofSpec) -> Self { + let spec = spec.0; + Ics23ProofSpec { + leaf_spec: spec.leaf_spec.map(|lop| LeafOp(lop).into()), + inner_spec: spec.inner_spec.map(|ispec| InnerSpec(ispec).into()), + max_depth: spec.max_depth, + min_depth: spec.min_depth, + } + } } #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] struct LeafOp(IbcLeafOp); impl From for LeafOp { - fn from(leaf_op: Ics23LeafOp) -> Self { - Self(IbcLeafOp { - hash: leaf_op.hash, - prehash_key: leaf_op.prehash_key, - prehash_value: leaf_op.prehash_value, - length: leaf_op.length, - prefix: leaf_op.prefix, - }) - } + fn from(leaf_op: Ics23LeafOp) -> Self { + Self(IbcLeafOp { + hash: leaf_op.hash, + prehash_key: leaf_op.prehash_key, + prehash_value: leaf_op.prehash_value, + length: leaf_op.length, + prefix: leaf_op.prefix, + }) + } } impl From for Ics23LeafOp { - fn from(leaf_op: LeafOp) -> Self { - let leaf_op = leaf_op.0; - Ics23LeafOp { - hash: leaf_op.hash, - prehash_key: leaf_op.prehash_key, - prehash_value: leaf_op.prehash_value, - length: leaf_op.length, - prefix: leaf_op.prefix, - } - } + fn from(leaf_op: LeafOp) -> Self { + let leaf_op = leaf_op.0; + Ics23LeafOp { + hash: leaf_op.hash, + prehash_key: leaf_op.prehash_key, + prehash_value: leaf_op.prehash_value, + length: leaf_op.length, + prefix: leaf_op.prefix, + } + } } #[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] struct InnerSpec(IbcInnerSpec); impl From for InnerSpec { - fn from(inner_spec: Ics23InnerSpec) -> Self { - Self(IbcInnerSpec { - child_order: inner_spec.child_order, - child_size: inner_spec.child_size, - min_prefix_length: inner_spec.min_prefix_length, - max_prefix_length: inner_spec.max_prefix_length, - empty_child: inner_spec.empty_child, - hash: inner_spec.hash, - }) - } + fn from(inner_spec: Ics23InnerSpec) -> Self { + Self(IbcInnerSpec { + child_order: inner_spec.child_order, + child_size: inner_spec.child_size, + min_prefix_length: inner_spec.min_prefix_length, + max_prefix_length: inner_spec.max_prefix_length, + empty_child: inner_spec.empty_child, + hash: inner_spec.hash, + }) + } } impl From for Ics23InnerSpec { - fn from(inner_spec: InnerSpec) -> Self { - let inner_spec = inner_spec.0; - Ics23InnerSpec { - child_order: inner_spec.child_order, - child_size: inner_spec.child_size, - min_prefix_length: inner_spec.min_prefix_length, - max_prefix_length: inner_spec.max_prefix_length, - empty_child: inner_spec.empty_child, - hash: inner_spec.hash, - } - } + fn from(inner_spec: InnerSpec) -> Self { + let inner_spec = inner_spec.0; + Ics23InnerSpec { + child_order: inner_spec.child_order, + child_size: inner_spec.child_size, + min_prefix_length: inner_spec.min_prefix_length, + max_prefix_length: inner_spec.max_prefix_length, + empty_child: inner_spec.empty_child, + hash: inner_spec.hash, + } + } } diff --git a/modules/src/core/ics24_host/error.rs b/modules/src/core/ics24_host/error.rs index de71ad7974..5ef4b12aab 100644 --- a/modules/src/core/ics24_host/error.rs +++ b/modules/src/core/ics24_host/error.rs @@ -6,40 +6,40 @@ use serde::Serialize; use crate::prelude::*; define_error! { - #[derive(Debug, PartialEq, Eq, Serialize)] - ValidationError { - ContainSeparator - { id : String } - | e | { format_args!("identifier {0} cannot contain separator '/'", e.id) }, - - InvalidLength - { - id: String, - length: usize, - min: usize, - max: usize, - } - | e | { format_args!("identifier {0} has invalid length {1} must be between {2}-{3} characters", e.id, e.length, e.min, e.max) }, - - InvalidCharacter - { id: String } - | e | { format_args!("identifier {0} must only contain alphanumeric characters or `.`, `_`, `+`, `-`, `#`, - `[`, `]`, `<`, `>`", e.id) }, - - Empty - | _ | { "identifier cannot be empty" }, - - ChainIdInvalidFormat - { id: String } - | e | { format_args!("chain identifiers are expected to be in epoch format {0}", e.id) }, - - ChannelIdInvalidFormat - | _ | { "channel identifiers are expected to be in `channel-{N}` format" }, - - ChannelIdParseFailure - [ TraceError ] - | _ | { "failed to parse channel identifier" }, - - InvalidCounterpartyChannelId - |_| { "Invalid channel id in counterparty" } - } + #[derive(Debug, PartialEq, Eq, Serialize)] + ValidationError { + ContainSeparator + { id : String } + | e | { format_args!("identifier {0} cannot contain separator '/'", e.id) }, + + InvalidLength + { + id: String, + length: usize, + min: usize, + max: usize, + } + | e | { format_args!("identifier {0} has invalid length {1} must be between {2}-{3} characters", e.id, e.length, e.min, e.max) }, + + InvalidCharacter + { id: String } + | e | { format_args!("identifier {0} must only contain alphanumeric characters or `.`, `_`, `+`, `-`, `#`, - `[`, `]`, `<`, `>`", e.id) }, + + Empty + | _ | { "identifier cannot be empty" }, + + ChainIdInvalidFormat + { id: String } + | e | { format_args!("chain identifiers are expected to be in epoch format {0}", e.id) }, + + ChannelIdInvalidFormat + | _ | { "channel identifiers are expected to be in `channel-{N}` format" }, + + ChannelIdParseFailure + [ TraceError ] + | _ | { "failed to parse channel identifier" }, + + InvalidCounterpartyChannelId + |_| { "Invalid channel id in counterparty" } + } } diff --git a/modules/src/core/ics24_host/identifier.rs b/modules/src/core/ics24_host/identifier.rs index 21f53b9363..7b01c3a90a 100644 --- a/modules/src/core/ics24_host/identifier.rs +++ b/modules/src/core/ics24_host/identifier.rs @@ -1,13 +1,14 @@ -use core::convert::{From, Infallible}; -use core::fmt::{self, Debug, Display, Formatter}; -use core::str::FromStr; +use core::{ + convert::{From, Infallible}, + fmt::{self, Debug, Display, Formatter}, + str::FromStr, +}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use super::validate::*; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics24_host::error::ValidationError; -use crate::prelude::*; + +use crate::{core::ics24_host::error::ValidationError, prelude::*}; /// This type is subject to future changes. /// @@ -19,189 +20,163 @@ use crate::prelude::*; #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[serde(from = "tendermint::chain::Id", into = "tendermint::chain::Id")] pub struct ChainId { - id: String, - version: u64, + id: String, + version: u64, } impl ChainId { - /// Creates a new `ChainId` given a chain name and an epoch number. - /// - /// The returned `ChainId` will have the format: `{chain name}-{epoch number}`. - /// ``` - /// use ibc::core::ics24_host::identifier::ChainId; - /// - /// let epoch_number = 10; - /// let id = ChainId::new("chainA".to_string(), epoch_number); - /// assert_eq!(id.version(), epoch_number); - /// ``` - pub fn new(name: String, version: u64) -> Self { - Self { - id: format!("{}-{}", name, version), - version, - } - } - - pub fn from_string(id: &str) -> Self { - let version = if Self::is_epoch_format(id) { - Self::chain_version(id) - } else { - 0 - }; - - Self { - id: id.to_string(), - version, - } - } - - /// Get a reference to the underlying string. - pub fn as_str(&self) -> &str { - &self.id - } - - // TODO: this should probably be named epoch_number. - /// Extract the version from this chain identifier. - pub fn version(&self) -> u64 { - self.version - } - - /// Extract the version from the given chain identifier. - /// ``` - /// use ibc::core::ics24_host::identifier::ChainId; - /// - /// assert_eq!(ChainId::chain_version("chain--a-0"), 0); - /// assert_eq!(ChainId::chain_version("ibc-10"), 10); - /// assert_eq!(ChainId::chain_version("cosmos-hub-97"), 97); - /// assert_eq!(ChainId::chain_version("testnet-helloworld-2"), 2); - /// ``` - pub fn chain_version(chain_id: &str) -> u64 { - if !ChainId::is_epoch_format(chain_id) { - return 0; - } - - let split: Vec<_> = chain_id.split('-').collect(); - split - .last() - .expect("get revision number from chain_id") - .parse() - .unwrap_or(0) - } - - /// is_epoch_format() checks if a chain_id is in the format required for parsing epochs - /// The chainID must be in the form: `{chainID}-{version}` - /// ``` - /// use ibc::core::ics24_host::identifier::ChainId; - /// assert_eq!(ChainId::is_epoch_format("chainA-0"), false); - /// assert_eq!(ChainId::is_epoch_format("chainA"), false); - /// assert_eq!(ChainId::is_epoch_format("chainA-1"), true); - /// ``` - pub fn is_epoch_format(chain_id: &str) -> bool { - let re = safe_regex::regex!(br".+[^-]-{1}[1-9][0-9]*"); - re.is_match(chain_id.as_bytes()) - } + /// Creates a new `ChainId` given a chain name and an epoch number. + /// + /// The returned `ChainId` will have the format: `{chain name}-{epoch number}`. + /// ``` + /// use ibc::core::ics24_host::identifier::ChainId; + /// + /// let epoch_number = 10; + /// let id = ChainId::new("chainA".to_string(), epoch_number); + /// assert_eq!(id.version(), epoch_number); + /// ``` + pub fn new(name: String, version: u64) -> Self { + Self { id: format!("{}-{}", name, version), version } + } + + pub fn from_string(id: &str) -> Self { + let version = if Self::is_epoch_format(id) { Self::chain_version(id) } else { 0 }; + + Self { id: id.to_string(), version } + } + + /// Get a reference to the underlying string. + pub fn as_str(&self) -> &str { + &self.id + } + + // TODO: this should probably be named epoch_number. + /// Extract the version from this chain identifier. + pub fn version(&self) -> u64 { + self.version + } + + /// Extract the version from the given chain identifier. + /// ``` + /// use ibc::core::ics24_host::identifier::ChainId; + /// + /// assert_eq!(ChainId::chain_version("chain--a-0"), 0); + /// assert_eq!(ChainId::chain_version("ibc-10"), 10); + /// assert_eq!(ChainId::chain_version("cosmos-hub-97"), 97); + /// assert_eq!(ChainId::chain_version("testnet-helloworld-2"), 2); + /// ``` + pub fn chain_version(chain_id: &str) -> u64 { + if !ChainId::is_epoch_format(chain_id) { + return 0 + } + + let split: Vec<_> = chain_id.split('-').collect(); + split.last().expect("get revision number from chain_id").parse().unwrap_or(0) + } + + /// is_epoch_format() checks if a chain_id is in the format required for parsing epochs + /// The chainID must be in the form: `{chainID}-{version}` + /// ``` + /// use ibc::core::ics24_host::identifier::ChainId; + /// assert_eq!(ChainId::is_epoch_format("chainA-0"), false); + /// assert_eq!(ChainId::is_epoch_format("chainA"), false); + /// assert_eq!(ChainId::is_epoch_format("chainA-1"), true); + /// ``` + pub fn is_epoch_format(chain_id: &str) -> bool { + let re = safe_regex::regex!(br".+[^-]-{1}[1-9][0-9]*"); + re.is_match(chain_id.as_bytes()) + } } impl FromStr for ChainId { - type Err = Infallible; + type Err = Infallible; - fn from_str(id: &str) -> Result { - Ok(Self::from_string(id)) - } + fn from_str(id: &str) -> Result { + Ok(Self::from_string(id)) + } } impl Display for ChainId { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}", self.id) - } + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}", self.id) + } } impl From for tendermint::chain::Id { - fn from(id: ChainId) -> Self { - tendermint::chain::Id::from_str(id.as_str()).unwrap() - } + fn from(id: ChainId) -> Self { + tendermint::chain::Id::from_str(id.as_str()).unwrap() + } } impl From for ChainId { - fn from(id: tendermint::chain::Id) -> Self { - ChainId::from_str(id.as_str()).unwrap() - } + fn from(id: tendermint::chain::Id) -> Self { + ChainId::from_str(id.as_str()).unwrap() + } } impl Default for ChainId { - fn default() -> Self { - "defaultChainId".to_string().parse().unwrap() - } + fn default() -> Self { + "defaultChainId".to_string().parse().unwrap() + } } impl From for ChainId { - fn from(value: String) -> Self { - Self::from_string(&value) - } + fn from(value: String) -> Self { + Self::from_string(&value) + } } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct ClientId(String); impl ClientId { - /// Builds a new client identifier. Client identifiers are deterministically formed from two - /// elements: a prefix derived from the client type `ctype`, and a monotonically increasing - /// `counter`; these are separated by a dash "-". - /// - /// ``` - /// # use ibc::core::ics24_host::identifier::ClientId; - /// # use ibc::core::ics02_client::client_type::ClientType; - /// let tm_client_id = ClientId::new(ClientType::Tendermint, 0); - /// assert!(tm_client_id.is_ok()); - /// tm_client_id.map(|id| { assert_eq!(&id, "07-tendermint-0") }); - /// ``` - pub fn new(ctype: ClientType, counter: u64) -> Result { - let prefix = Self::prefix(ctype); - let id = format!("{}-{}", prefix, counter); - Self::from_str(id.as_str()) - } - - /// Get this identifier as a borrowed `&str` - pub fn as_str(&self) -> &str { - &self.0 - } - - /// Returns one of the prefixes that should be present in any client identifiers. - /// The prefix is deterministic for a given chain type, hence all clients for a Tendermint-type - /// chain, for example, will have the prefix '07-tendermint'. - pub fn prefix(client_type: ClientType) -> &'static str { - match client_type { - ClientType::Tendermint => ClientType::Tendermint.as_str(), - - #[cfg(any(test, feature = "mocks"))] - ClientType::Mock => ClientType::Mock.as_str(), - } - } - - /// Get this identifier as a borrowed byte slice - pub fn as_bytes(&self) -> &[u8] { - self.0.as_bytes() - } + /// Builds a new client identifier. Client identifiers are deterministically formed from two + /// elements: a prefix derived from the client type `ctype`, and a monotonically increasing + /// `counter`; these are separated by a dash "-". + /// + /// ``` + /// # use ibc::core::ics24_host::identifier::ClientId; + /// # use ibc::core::ics02_client::client_state::ClientType; + /// let tm_client_id = ClientId::new("07-tendermint", 0); + /// assert!(tm_client_id.is_ok()); + /// tm_client_id.map(|id| { assert_eq!(&id, "07-tendermint-0") }); + /// ``` + pub fn new(prefix: &str, counter: u64) -> Result { + let id = format!("{}-{}", prefix, counter); + Self::from_str(id.as_str()) + } + + /// Get this identifier as a borrowed `&str` + pub fn as_str(&self) -> &str { + &self.0 + } + + /// Get this identifier as a borrowed byte slice + pub fn as_bytes(&self) -> &[u8] { + self.0.as_bytes() + } } /// This implementation provides a `to_string` method. impl Display for ClientId { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}", self.0) - } + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}", self.0) + } } impl FromStr for ClientId { - type Err = ValidationError; + type Err = ValidationError; - fn from_str(s: &str) -> Result { - validate_client_identifier(s).map(|_| Self(s.to_string())) - } + fn from_str(s: &str) -> Result { + validate_client_identifier(s).map(|_| Self(s.to_string())) + } } +#[cfg(not(test))] impl Default for ClientId { - fn default() -> Self { - Self::new(ClientType::Tendermint, 0).unwrap() - } + fn default() -> Self { + Self::new("00-uninitialized", 0).unwrap() + } } /// Equality check against string literal (satisfies &ClientId == &str). @@ -213,65 +188,65 @@ impl Default for ClientId { /// client_id.map(|id| {assert_eq!(&id, "clientidtwo")}); /// ``` impl PartialEq for ClientId { - fn eq(&self, other: &str) -> bool { - self.as_str().eq(other) - } + fn eq(&self, other: &str) -> bool { + self.as_str().eq(other) + } } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct ConnectionId(String); impl ConnectionId { - /// Builds a new connection identifier. Connection identifiers are deterministically formed from - /// two elements: a prefix `prefix`, and a monotonically increasing `counter`; these are - /// separated by a dash "-". The prefix is currently determined statically (see - /// `ConnectionId::prefix()`) so this method accepts a single argument, the `counter`. - /// - /// ``` - /// # use ibc::core::ics24_host::identifier::ConnectionId; - /// let conn_id = ConnectionId::new(11); - /// assert_eq!(&conn_id, "connection-11"); - /// ``` - pub fn new(counter: u64) -> Self { - let id = format!("{}-{}", Self::prefix(), counter); - Self::from_str(id.as_str()).unwrap() - } - - /// Returns the static prefix to be used across all connection identifiers. - pub fn prefix() -> &'static str { - "connection" - } - - /// Get this identifier as a borrowed `&str` - pub fn as_str(&self) -> &str { - &self.0 - } - - /// Get this identifier as a borrowed byte slice - pub fn as_bytes(&self) -> &[u8] { - self.0.as_bytes() - } + /// Builds a new connection identifier. Connection identifiers are deterministically formed from + /// two elements: a prefix `prefix`, and a monotonically increasing `counter`; these are + /// separated by a dash "-". The prefix is currently determined statically (see + /// `ConnectionId::prefix()`) so this method accepts a single argument, the `counter`. + /// + /// ``` + /// # use ibc::core::ics24_host::identifier::ConnectionId; + /// let conn_id = ConnectionId::new(11); + /// assert_eq!(&conn_id, "connection-11"); + /// ``` + pub fn new(counter: u64) -> Self { + let id = format!("{}-{}", Self::prefix(), counter); + Self::from_str(id.as_str()).unwrap() + } + + /// Returns the static prefix to be used across all connection identifiers. + pub fn prefix() -> &'static str { + "connection" + } + + /// Get this identifier as a borrowed `&str` + pub fn as_str(&self) -> &str { + &self.0 + } + + /// Get this identifier as a borrowed byte slice + pub fn as_bytes(&self) -> &[u8] { + self.0.as_bytes() + } } /// This implementation provides a `to_string` method. impl Display for ConnectionId { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}", self.0) - } + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}", self.0) + } } impl FromStr for ConnectionId { - type Err = ValidationError; + type Err = ValidationError; - fn from_str(s: &str) -> Result { - validate_connection_identifier(s).map(|_| Self(s.to_string())) - } + fn from_str(s: &str) -> Result { + validate_connection_identifier(s).map(|_| Self(s.to_string())) + } } impl Default for ConnectionId { - fn default() -> Self { - Self::new(0) - } + fn default() -> Self { + Self::new(0) + } } /// Equality check against string literal (satisfies &ConnectionId == &str). @@ -283,155 +258,150 @@ impl Default for ConnectionId { /// conn_id.map(|id| {assert_eq!(&id, "connectionId-0")}); /// ``` impl PartialEq for ConnectionId { - fn eq(&self, other: &str) -> bool { - self.as_str().eq(other) - } + fn eq(&self, other: &str) -> bool { + self.as_str().eq(other) + } } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct PortId(String); impl PortId { - /// Infallible creation of the well-known transfer port - pub fn transfer() -> Self { - Self("transfer".to_string()) - } + /// Infallible creation of the well-known transfer port + pub fn transfer() -> Self { + Self("transfer".to_string()) + } - /// Get this identifier as a borrowed `&str` - pub fn as_str(&self) -> &str { - &self.0 - } + /// Get this identifier as a borrowed `&str` + pub fn as_str(&self) -> &str { + &self.0 + } - /// Get this identifier as a borrowed byte slice - pub fn as_bytes(&self) -> &[u8] { - self.0.as_bytes() - } + /// Get this identifier as a borrowed byte slice + pub fn as_bytes(&self) -> &[u8] { + self.0.as_bytes() + } } /// This implementation provides a `to_string` method. impl Display for PortId { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}", self.0) - } + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}", self.0) + } } impl FromStr for PortId { - type Err = ValidationError; + type Err = ValidationError; - fn from_str(s: &str) -> Result { - validate_port_identifier(s).map(|_| Self(s.to_string())) - } + fn from_str(s: &str) -> Result { + validate_port_identifier(s).map(|_| Self(s.to_string())) + } } impl AsRef for PortId { - fn as_ref(&self) -> &str { - self.0.as_str() - } + fn as_ref(&self) -> &str { + self.0.as_str() + } } impl Default for PortId { - fn default() -> Self { - "defaultPort".to_string().parse().unwrap() - } + fn default() -> Self { + "defaultPort".to_string().parse().unwrap() + } } #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct ChannelId(u64); impl ChannelId { - /// Builds a new channel identifier. Like client and connection identifiers, channel ids are - /// deterministically formed from two elements: a prefix `prefix`, and a monotonically - /// increasing `counter`, separated by a dash "-". - /// The prefix is currently determined statically (see `ChannelId::prefix()`) so this method - /// accepts a single argument, the `counter`. - /// - /// ``` - /// # use ibc::core::ics24_host::identifier::ChannelId; - /// let chan_id = ChannelId::new(27); - /// assert_eq!(chan_id.to_string(), "channel-27"); - /// ``` - pub fn new(counter: u64) -> Self { - Self(counter) - } - - pub fn sequence(&self) -> u64 { - self.0 - } - - const fn prefix() -> &'static str { - "channel-" - } + /// Builds a new channel identifier. Like client and connection identifiers, channel ids are + /// deterministically formed from two elements: a prefix `prefix`, and a monotonically + /// increasing `counter`, separated by a dash "-". + /// The prefix is currently determined statically (see `ChannelId::prefix()`) so this method + /// accepts a single argument, the `counter`. + /// + /// ``` + /// # use ibc::core::ics24_host::identifier::ChannelId; + /// let chan_id = ChannelId::new(27); + /// assert_eq!(chan_id.to_string(), "channel-27"); + /// ``` + pub fn new(counter: u64) -> Self { + Self(counter) + } + + pub fn sequence(&self) -> u64 { + self.0 + } + + const fn prefix() -> &'static str { + "channel-" + } } /// This implementation provides a `to_string` method. impl Display for ChannelId { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}{}", Self::prefix(), self.0) - } + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}{}", Self::prefix(), self.0) + } } impl Debug for ChannelId { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - f.debug_tuple("ChannelId").field(&self.to_string()).finish() - } + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + f.debug_tuple("ChannelId").field(&self.to_string()).finish() + } } impl FromStr for ChannelId { - type Err = ValidationError; + type Err = ValidationError; - fn from_str(s: &str) -> Result { - let s = s - .strip_prefix(Self::prefix()) - .ok_or_else(ValidationError::channel_id_invalid_format)?; - let counter = u64::from_str(s).map_err(ValidationError::channel_id_parse_failure)?; - Ok(Self(counter)) - } + fn from_str(s: &str) -> Result { + let s = s + .strip_prefix(Self::prefix()) + .ok_or_else(ValidationError::channel_id_invalid_format)?; + let counter = u64::from_str(s).map_err(ValidationError::channel_id_parse_failure)?; + Ok(Self(counter)) + } } impl Default for ChannelId { - fn default() -> Self { - Self::new(0) - } + fn default() -> Self { + Self::new(0) + } } impl Serialize for ChannelId { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.collect_str(self) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.collect_str(self) + } } impl<'de> Deserialize<'de> for ChannelId { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - String::deserialize(deserializer)? - .parse() - .map_err(de::Error::custom) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + String::deserialize(deserializer)?.parse().map_err(de::Error::custom) + } } /// A pair of [`PortId`] and [`ChannelId`] are used together for sending IBC packets. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] pub struct PortChannelId { - pub channel_id: ChannelId, - pub port_id: PortId, + pub channel_id: ChannelId, + pub port_id: PortId, } impl PortChannelId { - pub fn new(channel_id: ChannelId, port_id: PortId) -> Self { - Self { - channel_id, - port_id, - } - } + pub fn new(channel_id: ChannelId, port_id: PortId) -> Self { + Self { channel_id, port_id } + } } impl Display for PortChannelId { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - write!(f, "{}/{}", self.port_id, self.channel_id) - } + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { + write!(f, "{}/{}", self.port_id, self.channel_id) + } } diff --git a/modules/src/core/ics24_host/path.rs b/modules/src/core/ics24_host/path.rs index 56542e1d26..b21a104fdc 100644 --- a/modules/src/core/ics24_host/path.rs +++ b/modules/src/core/ics24_host/path.rs @@ -3,11 +3,12 @@ use crate::prelude::*; /// Path-space as listed in ICS-024 /// https://github.com/cosmos/ibc/tree/master/spec/core/ics-024-host-requirements#path-space /// Some of these are implemented in other ICSs, but ICS-024 has a nice summary table. -/// use core::str::FromStr; -use crate::core::ics04_channel::packet::Sequence; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; +use crate::core::{ + ics04_channel::packet::Sequence, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, +}; use derive_more::{Display, From}; use flex_error::define_error; @@ -22,7 +23,7 @@ pub const SDK_UPGRADE_QUERY_PATH: &str = "store/upgrade/key"; /// ABCI client upgrade keys /// - The key identifying the upgraded IBC state within the upgrade sub-store const UPGRADED_IBC_STATE: &str = "upgradedIBCState"; -///- The key identifying the upgraded client state +/// - The key identifying the upgraded client state const UPGRADED_CLIENT_STATE: &str = "upgradedClient"; /// - The key identifying the upgraded consensus state const UPGRADED_CLIENT_CONSENSUS_STATE: &str = "upgradedConsState"; @@ -30,20 +31,20 @@ const UPGRADED_CLIENT_CONSENSUS_STATE: &str = "upgradedConsState"; /// The Path enum abstracts out the different sub-paths. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, From, Display)] pub enum Path { - ClientType(ClientTypePath), - ClientState(ClientStatePath), - ClientConsensusState(ClientConsensusStatePath), - ClientConnections(ClientConnectionsPath), - Connections(ConnectionsPath), - Ports(PortsPath), - ChannelEnds(ChannelEndsPath), - SeqSends(SeqSendsPath), - SeqRecvs(SeqRecvsPath), - SeqAcks(SeqAcksPath), - Commitments(CommitmentsPath), - Acks(AcksPath), - Receipts(ReceiptsPath), - Upgrade(ClientUpgradePath), + ClientType(ClientTypePath), + ClientState(ClientStatePath), + ClientConsensusState(ClientConsensusStatePath), + ClientConnections(ClientConnectionsPath), + Connections(ConnectionsPath), + Ports(PortsPath), + ChannelEnds(ChannelEndsPath), + SeqSends(SeqSendsPath), + SeqRecvs(SeqRecvsPath), + SeqAcks(SeqAcksPath), + Commitments(CommitmentsPath), + Acks(AcksPath), + Receipts(ReceiptsPath), + Upgrade(ClientUpgradePath), } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Display)] @@ -55,16 +56,11 @@ pub struct ClientTypePath(pub ClientId); pub struct ClientStatePath(pub ClientId); #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Display)] -#[display( - fmt = "clients/{}/consensusStates/{}-{}", - "client_id", - "epoch", - "height" -)] +#[display(fmt = "clients/{}/consensusStates/{}-{}", "client_id", "epoch", "height")] pub struct ClientConsensusStatePath { - pub client_id: ClientId, - pub epoch: u64, - pub height: u64, + pub client_id: ClientId, + pub epoch: u64, + pub height: u64, } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Display)] @@ -97,921 +93,800 @@ pub struct SeqAcksPath(pub PortId, pub ChannelId); #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Display)] #[display( - fmt = "commitments/ports/{}/channels/{}/sequences/{}", - "port_id", - "channel_id", - "sequence" + fmt = "commitments/ports/{}/channels/{}/sequences/{}", + "port_id", + "channel_id", + "sequence" )] pub struct CommitmentsPath { - pub port_id: PortId, - pub channel_id: ChannelId, - pub sequence: Sequence, + pub port_id: PortId, + pub channel_id: ChannelId, + pub sequence: Sequence, } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Display)] -#[display( - fmt = "acks/ports/{}/channels/{}/sequences/{}", - "port_id", - "channel_id", - "sequence" -)] +#[display(fmt = "acks/ports/{}/channels/{}/sequences/{}", "port_id", "channel_id", "sequence")] pub struct AcksPath { - pub port_id: PortId, - pub channel_id: ChannelId, - pub sequence: Sequence, + pub port_id: PortId, + pub channel_id: ChannelId, + pub sequence: Sequence, } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Display)] -#[display( - fmt = "receipts/ports/{}/channels/{}/sequences/{}", - "port_id", - "channel_id", - "sequence" -)] +#[display(fmt = "receipts/ports/{}/channels/{}/sequences/{}", "port_id", "channel_id", "sequence")] pub struct ReceiptsPath { - pub port_id: PortId, - pub channel_id: ChannelId, - pub sequence: Sequence, + pub port_id: PortId, + pub channel_id: ChannelId, + pub sequence: Sequence, } /// Paths that are specific for client upgrades. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Display)] pub enum ClientUpgradePath { - #[display(fmt = "{}/{}/{}", UPGRADED_IBC_STATE, _0, UPGRADED_CLIENT_STATE)] - UpgradedClientState(u64), - #[display( - fmt = "{}/{}/{}", - UPGRADED_IBC_STATE, - _0, - UPGRADED_CLIENT_CONSENSUS_STATE - )] - UpgradedClientConsensusState(u64), + #[display(fmt = "{}/{}/{}", UPGRADED_IBC_STATE, _0, UPGRADED_CLIENT_STATE)] + UpgradedClientState(u64), + #[display(fmt = "{}/{}/{}", UPGRADED_IBC_STATE, _0, UPGRADED_CLIENT_CONSENSUS_STATE)] + UpgradedClientConsensusState(u64), } /// Sub-paths which are not part of the specification, but are still /// useful to represent for parsing purposes. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] enum SubPath { - Channels(ChannelId), - Sequences(Sequence), + Channels(ChannelId), + Sequences(Sequence), } impl Path { - /// Indication if the path is provable. - pub fn is_provable(&self) -> bool { - !matches!(&self, Path::ClientConnections(_) | Path::Ports(_)) - } - - /// into_bytes implementation - pub fn into_bytes(self) -> Vec { - self.to_string().into_bytes() - } + /// Indication if the path is provable. + pub fn is_provable(&self) -> bool { + !matches!(&self, Path::ClientConnections(_) | Path::Ports(_)) + } + + /// into_bytes implementation + pub fn into_bytes(self) -> Vec { + self.to_string().into_bytes() + } } define_error! { - #[derive(Eq, PartialEq)] - PathError { - ParseFailure - { path: String } - | e | { format!("'{}' could not be parsed into a Path", e.path) }, - } + #[derive(Eq, PartialEq)] + PathError { + ParseFailure + { path: String } + | e | { format!("'{}' could not be parsed into a Path", e.path) }, + } } /// The FromStr trait allows paths encoded as strings to be parsed into Paths. impl FromStr for Path { - type Err = PathError; - - fn from_str(s: &str) -> Result { - let components: Vec<&str> = s.split('/').collect(); - - parse_client_paths(&components) - .or_else(|| parse_connections(&components)) - .or_else(|| parse_ports(&components)) - .or_else(|| parse_channel_ends(&components)) - .or_else(|| parse_seqs(&components)) - .or_else(|| parse_commitments(&components)) - .or_else(|| parse_acks(&components)) - .or_else(|| parse_receipts(&components)) - .or_else(|| parse_upgrades(&components)) - .ok_or_else(|| PathError::parse_failure(s.to_string())) - } + type Err = PathError; + + fn from_str(s: &str) -> Result { + let components: Vec<&str> = s.split('/').collect(); + + parse_client_paths(&components) + .or_else(|| parse_connections(&components)) + .or_else(|| parse_ports(&components)) + .or_else(|| parse_channel_ends(&components)) + .or_else(|| parse_seqs(&components)) + .or_else(|| parse_commitments(&components)) + .or_else(|| parse_acks(&components)) + .or_else(|| parse_receipts(&components)) + .or_else(|| parse_upgrades(&components)) + .ok_or_else(|| PathError::parse_failure(s.to_string())) + } } fn parse_client_paths(components: &[&str]) -> Option { - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "clients" { - return None; - } - - let client_id = match ClientId::from_str(components[1]) { - Ok(s) => s, - Err(_) => return None, - }; - - if components.len() == 3 { - match components[2] { - "clientType" => Some(ClientTypePath(client_id).into()), - "clientState" => Some(ClientStatePath(client_id).into()), - "connections" => Some(ClientConnectionsPath(client_id).into()), - _ => None, - } - } else if components.len() == 4 { - if "consensusStates" != components[2] { - return None; - } - - let epoch_height = match components.last() { - Some(eh) => *eh, - None => return None, - }; - - let epoch_height: Vec<&str> = epoch_height.split('-').collect(); - - if epoch_height.len() != 2 { - return None; - } - - let epoch = epoch_height[0]; - let height = epoch_height[1]; - - let epoch = match epoch.parse::() { - Ok(ep) => ep, - Err(_) => return None, - }; - - let height = match height.parse::() { - Ok(h) => h, - Err(_) => return None, - }; - - Some( - ClientConsensusStatePath { - client_id, - epoch, - height, - } - .into(), - ) - } else { - None - } + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "clients" { + return None + } + + let client_id = match ClientId::from_str(components[1]) { + Ok(s) => s, + Err(_) => return None, + }; + + if components.len() == 3 { + match components[2] { + "clientType" => Some(ClientTypePath(client_id).into()), + "clientState" => Some(ClientStatePath(client_id).into()), + "connections" => Some(ClientConnectionsPath(client_id).into()), + _ => None, + } + } else if components.len() == 4 { + if "consensusStates" != components[2] { + return None + } + + let epoch_height = match components.last() { + Some(eh) => *eh, + None => return None, + }; + + let epoch_height: Vec<&str> = epoch_height.split('-').collect(); + + if epoch_height.len() != 2 { + return None + } + + let epoch = epoch_height[0]; + let height = epoch_height[1]; + + let epoch = match epoch.parse::() { + Ok(ep) => ep, + Err(_) => return None, + }; + + let height = match height.parse::() { + Ok(h) => h, + Err(_) => return None, + }; + + Some(ClientConsensusStatePath { client_id, epoch, height }.into()) + } else { + None + } } fn parse_connections(components: &[&str]) -> Option { - if components.len() != 2 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "connections" { - return None; - } - - let connection_id = match components.last() { - Some(c) => *c, - None => return None, - }; - - let connection_id = match ConnectionId::from_str(connection_id) { - Ok(c) => c, - Err(_) => return None, - }; - - Some(ConnectionsPath(connection_id).into()) + if components.len() != 2 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "connections" { + return None + } + + let connection_id = match components.last() { + Some(c) => *c, + None => return None, + }; + + let connection_id = match ConnectionId::from_str(connection_id) { + Ok(c) => c, + Err(_) => return None, + }; + + Some(ConnectionsPath(connection_id).into()) } fn parse_ports(components: &[&str]) -> Option { - if components.len() != 2 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "ports" { - return None; - } - - let port_id = match components.last() { - Some(p) => *p, - None => return None, - }; - - let port_id = match PortId::from_str(port_id) { - Ok(p) => p, - Err(_) => return None, - }; - - Some(PortsPath(port_id).into()) + if components.len() != 2 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "ports" { + return None + } + + let port_id = match components.last() { + Some(p) => *p, + None => return None, + }; + + let port_id = match PortId::from_str(port_id) { + Ok(p) => p, + Err(_) => return None, + }; + + Some(PortsPath(port_id).into()) } fn parse_channels(components: &[&str]) -> Option { - if components.len() != 2 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "channels" { - return None; - } - - let channel_id = match components.last() { - Some(c) => *c, - None => return None, - }; - - let channel_id = match ChannelId::from_str(channel_id) { - Ok(c) => c, - Err(_) => return None, - }; - - Some(SubPath::Channels(channel_id)) + if components.len() != 2 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "channels" { + return None + } + + let channel_id = match components.last() { + Some(c) => *c, + None => return None, + }; + + let channel_id = match ChannelId::from_str(channel_id) { + Ok(c) => c, + Err(_) => return None, + }; + + Some(SubPath::Channels(channel_id)) } fn parse_sequences(components: &[&str]) -> Option { - if components.len() != 2 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "sequences" { - return None; - } - - let sequence_number = match components.last() { - Some(s) => *s, - None => return None, - }; - - match Sequence::from_str(sequence_number) { - Ok(seq) => Some(SubPath::Sequences(seq)), - Err(_) => None, - } + if components.len() != 2 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "sequences" { + return None + } + + let sequence_number = match components.last() { + Some(s) => *s, + None => return None, + }; + + match Sequence::from_str(sequence_number) { + Ok(seq) => Some(SubPath::Sequences(seq)), + Err(_) => None, + } } fn parse_channel_ends(components: &[&str]) -> Option { - if components.len() != 5 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "channelEnds" { - return None; - } - - let port = parse_ports(&components[1..=2]); - let channel = parse_channels(&components[3..=4]); - - let port_id = if let Some(Path::Ports(PortsPath(port_id))) = port { - port_id - } else { - return None; - }; - - let channel_id = if let Some(SubPath::Channels(channel_id)) = channel { - channel_id - } else { - return None; - }; - - Some(ChannelEndsPath(port_id, channel_id).into()) + if components.len() != 5 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "channelEnds" { + return None + } + + let port = parse_ports(&components[1..=2]); + let channel = parse_channels(&components[3..=4]); + + let port_id = + if let Some(Path::Ports(PortsPath(port_id))) = port { port_id } else { return None }; + + let channel_id = + if let Some(SubPath::Channels(channel_id)) = channel { channel_id } else { return None }; + + Some(ChannelEndsPath(port_id, channel_id).into()) } fn parse_seqs(components: &[&str]) -> Option { - if components.len() != 5 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - let port = parse_ports(&components[1..=2]); - let channel = parse_channels(&components[3..=4]); - - let port_id = if let Some(Path::Ports(PortsPath(port_id))) = port { - port_id - } else { - return None; - }; - - let channel_id = if let Some(SubPath::Channels(channel_id)) = channel { - channel_id - } else { - return None; - }; - - match first { - "nextSequenceSend" => Some(SeqSendsPath(port_id, channel_id).into()), - "nextSequenceRecv" => Some(SeqRecvsPath(port_id, channel_id).into()), - "nextSequenceAck" => Some(SeqAcksPath(port_id, channel_id).into()), - _ => None, - } + if components.len() != 5 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + let port = parse_ports(&components[1..=2]); + let channel = parse_channels(&components[3..=4]); + + let port_id = + if let Some(Path::Ports(PortsPath(port_id))) = port { port_id } else { return None }; + + let channel_id = + if let Some(SubPath::Channels(channel_id)) = channel { channel_id } else { return None }; + + match first { + "nextSequenceSend" => Some(SeqSendsPath(port_id, channel_id).into()), + "nextSequenceRecv" => Some(SeqRecvsPath(port_id, channel_id).into()), + "nextSequenceAck" => Some(SeqAcksPath(port_id, channel_id).into()), + _ => None, + } } fn parse_commitments(components: &[&str]) -> Option { - if components.len() != 7 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "commitments" { - return None; - } - - let port = parse_ports(&components[1..=2]); - let channel = parse_channels(&components[3..=4]); - let sequence = parse_sequences(&components[5..]); - - let port_id = if let Some(Path::Ports(PortsPath(port_id))) = port { - port_id - } else { - return None; - }; - - let channel_id = if let Some(SubPath::Channels(channel_id)) = channel { - channel_id - } else { - return None; - }; - - let sequence = if let Some(SubPath::Sequences(seq)) = sequence { - seq - } else { - return None; - }; - - Some( - CommitmentsPath { - port_id, - channel_id, - sequence, - } - .into(), - ) + if components.len() != 7 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "commitments" { + return None + } + + let port = parse_ports(&components[1..=2]); + let channel = parse_channels(&components[3..=4]); + let sequence = parse_sequences(&components[5..]); + + let port_id = + if let Some(Path::Ports(PortsPath(port_id))) = port { port_id } else { return None }; + + let channel_id = + if let Some(SubPath::Channels(channel_id)) = channel { channel_id } else { return None }; + + let sequence = if let Some(SubPath::Sequences(seq)) = sequence { seq } else { return None }; + + Some(CommitmentsPath { port_id, channel_id, sequence }.into()) } fn parse_acks(components: &[&str]) -> Option { - if components.len() != 7 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "acks" { - return None; - } - - let port = parse_ports(&components[1..=2]); - let channel = parse_channels(&components[3..=4]); - let sequence = parse_sequences(&components[5..]); - - let port_id = if let Some(Path::Ports(PortsPath(port_id))) = port { - port_id - } else { - return None; - }; - - let channel_id = if let Some(SubPath::Channels(channel_id)) = channel { - channel_id - } else { - return None; - }; - - let sequence = if let Some(SubPath::Sequences(seq)) = sequence { - seq - } else { - return None; - }; - - Some( - AcksPath { - port_id, - channel_id, - sequence, - } - .into(), - ) + if components.len() != 7 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "acks" { + return None + } + + let port = parse_ports(&components[1..=2]); + let channel = parse_channels(&components[3..=4]); + let sequence = parse_sequences(&components[5..]); + + let port_id = + if let Some(Path::Ports(PortsPath(port_id))) = port { port_id } else { return None }; + + let channel_id = + if let Some(SubPath::Channels(channel_id)) = channel { channel_id } else { return None }; + + let sequence = if let Some(SubPath::Sequences(seq)) = sequence { seq } else { return None }; + + Some(AcksPath { port_id, channel_id, sequence }.into()) } fn parse_receipts(components: &[&str]) -> Option { - if components.len() != 7 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != "receipts" { - return None; - } - - let port = parse_ports(&components[1..=2]); - let channel = parse_channels(&components[3..=4]); - let sequence = parse_sequences(&components[5..]); - - let port_id = if let Some(Path::Ports(PortsPath(port_id))) = port { - port_id - } else { - return None; - }; - - let channel_id = if let Some(SubPath::Channels(channel_id)) = channel { - channel_id - } else { - return None; - }; - - let sequence = if let Some(SubPath::Sequences(seq)) = sequence { - seq - } else { - return None; - }; - - Some( - ReceiptsPath { - port_id, - channel_id, - sequence, - } - .into(), - ) + if components.len() != 7 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != "receipts" { + return None + } + + let port = parse_ports(&components[1..=2]); + let channel = parse_channels(&components[3..=4]); + let sequence = parse_sequences(&components[5..]); + + let port_id = + if let Some(Path::Ports(PortsPath(port_id))) = port { port_id } else { return None }; + + let channel_id = + if let Some(SubPath::Channels(channel_id)) = channel { channel_id } else { return None }; + + let sequence = if let Some(SubPath::Sequences(seq)) = sequence { seq } else { return None }; + + Some(ReceiptsPath { port_id, channel_id, sequence }.into()) } fn parse_upgrades(components: &[&str]) -> Option { - if components.len() != 3 { - return None; - } - - let first = match components.first() { - Some(f) => *f, - None => return None, - }; - - if first != UPGRADED_IBC_STATE { - return None; - } - - let last = match components.last() { - Some(l) => *l, - None => return None, - }; - - let height = match components[1].parse::() { - Ok(h) => h, - Err(_) => return None, - }; - - match last { - UPGRADED_CLIENT_STATE => Some(ClientUpgradePath::UpgradedClientState(height).into()), - UPGRADED_CLIENT_CONSENSUS_STATE => { - Some(ClientUpgradePath::UpgradedClientConsensusState(height).into()) - } - _ => None, - } + if components.len() != 3 { + return None + } + + let first = match components.first() { + Some(f) => *f, + None => return None, + }; + + if first != UPGRADED_IBC_STATE { + return None + } + + let last = match components.last() { + Some(l) => *l, + None => return None, + }; + + let height = match components[1].parse::() { + Ok(h) => h, + Err(_) => return None, + }; + + match last { + UPGRADED_CLIENT_STATE => Some(ClientUpgradePath::UpgradedClientState(height).into()), + UPGRADED_CLIENT_CONSENSUS_STATE => + Some(ClientUpgradePath::UpgradedClientConsensusState(height).into()), + _ => None, + } } #[cfg(test)] mod tests { - use super::*; - use core::str::FromStr; - - #[test] - fn invalid_path_doesnt_parse() { - let invalid_path = Path::from_str("clients/clientType"); - - assert!(invalid_path.is_err()); - } - - #[test] - fn test_parse_client_paths_fn() { - let path = "clients/07-tendermint-0/clientType"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_client_paths(&components), - Some(Path::ClientType(ClientTypePath(ClientId::default()))) - ); - - let path = "clients/07-tendermint-0/clientState"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_client_paths(&components), - Some(Path::ClientState(ClientStatePath(ClientId::default()))) - ); - - let path = "clients/07-tendermint-0/consensusStates/15-31"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_client_paths(&components), - Some(Path::ClientConsensusState(ClientConsensusStatePath { - client_id: ClientId::default(), - epoch: 15, - height: 31, - })) - ); - } - - #[test] - fn client_type_path_parses() { - let path = "clients/07-tendermint-0/clientType"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::ClientType(ClientTypePath(ClientId::default())) - ); - } - - #[test] - fn client_state_path_parses() { - let path = "clients/07-tendermint-0/clientState"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::ClientState(ClientStatePath(ClientId::default())) - ); - } - - #[test] - fn client_consensus_state_path_parses() { - let path = "clients/07-tendermint-0/consensusStates/15-31"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::ClientConsensusState(ClientConsensusStatePath { - client_id: ClientId::default(), - epoch: 15, - height: 31, - }) - ); - } - - #[test] - fn client_connections_path_parses() { - let path = "clients/07-tendermint-0/connections"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::ClientConnections(ClientConnectionsPath(ClientId::default())) - ); - } - - #[test] - fn test_parse_connections_fn() { - let path = "connections/connection-0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_connections(&components), - Some(Path::Connections(ConnectionsPath(ConnectionId::new(0)))), - ); - } - - #[test] - fn connections_path_parses() { - let path = "connections/connection-0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::Connections(ConnectionsPath(ConnectionId::new(0))) - ); - } - - #[test] - fn test_parse_ports_fn() { - let path = "ports/defaultPort"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_ports(&components), - Some(Path::Ports(PortsPath(PortId::default()))), - ); - } - - #[test] - fn ports_path_parses() { - let path = "ports/defaultPort"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!(path.unwrap(), Path::Ports(PortsPath(PortId::default()))); - } - - #[test] - fn test_parse_channels_fn() { - let path = "channels/channel-0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_channels(&components), - Some(SubPath::Channels(ChannelId::default())), - ); - } - - #[test] - fn channels_path_doesnt_parse() { - let path = "channels/channel-0"; - let path = Path::from_str(path); - - assert!(path.is_err()); - } - - #[test] - fn test_parse_sequences_fn() { - let path = "sequences/0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_sequences(&components), - Some(SubPath::Sequences(Sequence::default())) - ); - } - - #[test] - fn sequences_path_doesnt_parse() { - let path = "sequences/0"; - let path = Path::from_str(path); - - assert!(path.is_err()); - } - - #[test] - fn test_parse_channel_ends_fn() { - let path = "channelEnds/ports/defaultPort/channels/channel-0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_channel_ends(&components), - Some(Path::ChannelEnds(ChannelEndsPath( - PortId::default(), - ChannelId::default() - ))), - ); - } - - #[test] - fn channel_ends_path_parses() { - let path = "channelEnds/ports/defaultPort/channels/channel-0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::ChannelEnds(ChannelEndsPath(PortId::default(), ChannelId::default())), - ); - } - - #[test] - fn test_parse_seqs_fn() { - let path = "nextSequenceSend/ports/defaultPort/channels/channel-0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_seqs(&components), - Some(Path::SeqSends(SeqSendsPath( - PortId::default(), - ChannelId::default() - ))), - ); - - let path = "nextSequenceRecv/ports/defaultPort/channels/channel-0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_seqs(&components), - Some(Path::SeqRecvs(SeqRecvsPath( - PortId::default(), - ChannelId::default() - ))), - ); - - let path = "nextSequenceAck/ports/defaultPort/channels/channel-0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_seqs(&components), - Some(Path::SeqAcks(SeqAcksPath( - PortId::default(), - ChannelId::default() - ))), - ); - } - - #[test] - fn sequence_send_path_parses() { - let path = "nextSequenceSend/ports/defaultPort/channels/channel-0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::SeqSends(SeqSendsPath(PortId::default(), ChannelId::default())), - ); - } - - #[test] - fn sequence_recv_path_parses() { - let path = "nextSequenceRecv/ports/defaultPort/channels/channel-0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::SeqRecvs(SeqRecvsPath(PortId::default(), ChannelId::default())), - ); - } - - #[test] - fn sequence_ack_path_parses() { - let path = "nextSequenceAck/ports/defaultPort/channels/channel-0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::SeqAcks(SeqAcksPath(PortId::default(), ChannelId::default())), - ); - } - - #[test] - fn test_parse_commitments_fn() { - let path = "commitments/ports/defaultPort/channels/channel-0/sequences/0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_commitments(&components), - Some(Path::Commitments(CommitmentsPath { - port_id: PortId::default(), - channel_id: ChannelId::default(), - sequence: Sequence::default(), - })), - ); - } - - #[test] - fn commitments_path_parses() { - let path = "commitments/ports/defaultPort/channels/channel-0/sequences/0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::Commitments(CommitmentsPath { - port_id: PortId::default(), - channel_id: ChannelId::default(), - sequence: Sequence::default(), - }), - ); - } - - #[test] - fn test_parse_acks_fn() { - let path = "acks/ports/defaultPort/channels/channel-0/sequences/0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_acks(&components), - Some(Path::Acks(AcksPath { - port_id: PortId::default(), - channel_id: ChannelId::default(), - sequence: Sequence::default(), - })), - ); - } - - #[test] - fn acks_path_parses() { - let path = "acks/ports/defaultPort/channels/channel-0/sequences/0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::Acks(AcksPath { - port_id: PortId::default(), - channel_id: ChannelId::default(), - sequence: Sequence::default(), - }), - ); - } - - #[test] - fn test_parse_receipts_fn() { - let path = "receipts/ports/defaultPort/channels/channel-0/sequences/0"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_receipts(&components), - Some(Path::Receipts(ReceiptsPath { - port_id: PortId::default(), - channel_id: ChannelId::default(), - sequence: Sequence::default(), - })), - ); - } - - #[test] - fn receipts_path_parses() { - let path = "receipts/ports/defaultPort/channels/channel-0/sequences/0"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::Receipts(ReceiptsPath { - port_id: PortId::default(), - channel_id: ChannelId::default(), - sequence: Sequence::default(), - }), - ); - } - - #[test] - fn test_parse_upgrades_fn() { - let path = "upgradedIBCState/0/upgradedClient"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_upgrades(&components), - Some(Path::Upgrade(ClientUpgradePath::UpgradedClientState(0))), - ); - - let path = "upgradedIBCState/0/upgradedConsState"; - let components: Vec<&str> = path.split('/').collect(); - - assert_eq!( - parse_upgrades(&components), - Some(Path::Upgrade( - ClientUpgradePath::UpgradedClientConsensusState(0) - )), - ) - } - - #[test] - fn upgrade_client_state_path_parses() { - let path = "upgradedIBCState/0/upgradedClient"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::Upgrade(ClientUpgradePath::UpgradedClientState(0)), - ); - } - - #[test] - fn upgrade_client_consensus_state_path_parses() { - let path = "upgradedIBCState/0/upgradedConsState"; - let path = Path::from_str(path); - - assert!(path.is_ok()); - assert_eq!( - path.unwrap(), - Path::Upgrade(ClientUpgradePath::UpgradedClientConsensusState(0)), - ); - } + use super::*; + use core::str::FromStr; + + #[test] + fn invalid_path_doesnt_parse() { + let invalid_path = Path::from_str("clients/clientType"); + + assert!(invalid_path.is_err()); + } + + #[test] + fn test_parse_client_paths_fn() { + let path = "clients/07-tendermint-0/clientType"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_client_paths(&components), + Some(Path::ClientType(ClientTypePath(ClientId::default()))) + ); + + let path = "clients/07-tendermint-0/clientState"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_client_paths(&components), + Some(Path::ClientState(ClientStatePath(ClientId::default()))) + ); + + let path = "clients/07-tendermint-0/consensusStates/15-31"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_client_paths(&components), + Some(Path::ClientConsensusState(ClientConsensusStatePath { + client_id: ClientId::default(), + epoch: 15, + height: 31, + })) + ); + } + + #[test] + fn client_type_path_parses() { + let path = "clients/07-tendermint-0/clientType"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!(path.unwrap(), Path::ClientType(ClientTypePath(ClientId::default()))); + } + + #[test] + fn client_state_path_parses() { + let path = "clients/07-tendermint-0/clientState"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!(path.unwrap(), Path::ClientState(ClientStatePath(ClientId::default()))); + } + + #[test] + fn client_consensus_state_path_parses() { + let path = "clients/07-tendermint-0/consensusStates/15-31"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::ClientConsensusState(ClientConsensusStatePath { + client_id: ClientId::default(), + epoch: 15, + height: 31, + }) + ); + } + + #[test] + fn client_connections_path_parses() { + let path = "clients/07-tendermint-0/connections"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::ClientConnections(ClientConnectionsPath(ClientId::default())) + ); + } + + #[test] + fn test_parse_connections_fn() { + let path = "connections/connection-0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_connections(&components), + Some(Path::Connections(ConnectionsPath(ConnectionId::new(0)))), + ); + } + + #[test] + fn connections_path_parses() { + let path = "connections/connection-0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!(path.unwrap(), Path::Connections(ConnectionsPath(ConnectionId::new(0)))); + } + + #[test] + fn test_parse_ports_fn() { + let path = "ports/defaultPort"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!(parse_ports(&components), Some(Path::Ports(PortsPath(PortId::default()))),); + } + + #[test] + fn ports_path_parses() { + let path = "ports/defaultPort"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!(path.unwrap(), Path::Ports(PortsPath(PortId::default()))); + } + + #[test] + fn test_parse_channels_fn() { + let path = "channels/channel-0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!(parse_channels(&components), Some(SubPath::Channels(ChannelId::default())),); + } + + #[test] + fn channels_path_doesnt_parse() { + let path = "channels/channel-0"; + let path = Path::from_str(path); + + assert!(path.is_err()); + } + + #[test] + fn test_parse_sequences_fn() { + let path = "sequences/0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!(parse_sequences(&components), Some(SubPath::Sequences(Sequence::default()))); + } + + #[test] + fn sequences_path_doesnt_parse() { + let path = "sequences/0"; + let path = Path::from_str(path); + + assert!(path.is_err()); + } + + #[test] + fn test_parse_channel_ends_fn() { + let path = "channelEnds/ports/defaultPort/channels/channel-0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_channel_ends(&components), + Some(Path::ChannelEnds(ChannelEndsPath(PortId::default(), ChannelId::default()))), + ); + } + + #[test] + fn channel_ends_path_parses() { + let path = "channelEnds/ports/defaultPort/channels/channel-0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::ChannelEnds(ChannelEndsPath(PortId::default(), ChannelId::default())), + ); + } + + #[test] + fn test_parse_seqs_fn() { + let path = "nextSequenceSend/ports/defaultPort/channels/channel-0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_seqs(&components), + Some(Path::SeqSends(SeqSendsPath(PortId::default(), ChannelId::default()))), + ); + + let path = "nextSequenceRecv/ports/defaultPort/channels/channel-0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_seqs(&components), + Some(Path::SeqRecvs(SeqRecvsPath(PortId::default(), ChannelId::default()))), + ); + + let path = "nextSequenceAck/ports/defaultPort/channels/channel-0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_seqs(&components), + Some(Path::SeqAcks(SeqAcksPath(PortId::default(), ChannelId::default()))), + ); + } + + #[test] + fn sequence_send_path_parses() { + let path = "nextSequenceSend/ports/defaultPort/channels/channel-0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::SeqSends(SeqSendsPath(PortId::default(), ChannelId::default())), + ); + } + + #[test] + fn sequence_recv_path_parses() { + let path = "nextSequenceRecv/ports/defaultPort/channels/channel-0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::SeqRecvs(SeqRecvsPath(PortId::default(), ChannelId::default())), + ); + } + + #[test] + fn sequence_ack_path_parses() { + let path = "nextSequenceAck/ports/defaultPort/channels/channel-0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::SeqAcks(SeqAcksPath(PortId::default(), ChannelId::default())), + ); + } + + #[test] + fn test_parse_commitments_fn() { + let path = "commitments/ports/defaultPort/channels/channel-0/sequences/0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_commitments(&components), + Some(Path::Commitments(CommitmentsPath { + port_id: PortId::default(), + channel_id: ChannelId::default(), + sequence: Sequence::default(), + })), + ); + } + + #[test] + fn commitments_path_parses() { + let path = "commitments/ports/defaultPort/channels/channel-0/sequences/0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::Commitments(CommitmentsPath { + port_id: PortId::default(), + channel_id: ChannelId::default(), + sequence: Sequence::default(), + }), + ); + } + + #[test] + fn test_parse_acks_fn() { + let path = "acks/ports/defaultPort/channels/channel-0/sequences/0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_acks(&components), + Some(Path::Acks(AcksPath { + port_id: PortId::default(), + channel_id: ChannelId::default(), + sequence: Sequence::default(), + })), + ); + } + + #[test] + fn acks_path_parses() { + let path = "acks/ports/defaultPort/channels/channel-0/sequences/0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::Acks(AcksPath { + port_id: PortId::default(), + channel_id: ChannelId::default(), + sequence: Sequence::default(), + }), + ); + } + + #[test] + fn test_parse_receipts_fn() { + let path = "receipts/ports/defaultPort/channels/channel-0/sequences/0"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_receipts(&components), + Some(Path::Receipts(ReceiptsPath { + port_id: PortId::default(), + channel_id: ChannelId::default(), + sequence: Sequence::default(), + })), + ); + } + + #[test] + fn receipts_path_parses() { + let path = "receipts/ports/defaultPort/channels/channel-0/sequences/0"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::Receipts(ReceiptsPath { + port_id: PortId::default(), + channel_id: ChannelId::default(), + sequence: Sequence::default(), + }), + ); + } + + #[test] + fn test_parse_upgrades_fn() { + let path = "upgradedIBCState/0/upgradedClient"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_upgrades(&components), + Some(Path::Upgrade(ClientUpgradePath::UpgradedClientState(0))), + ); + + let path = "upgradedIBCState/0/upgradedConsState"; + let components: Vec<&str> = path.split('/').collect(); + + assert_eq!( + parse_upgrades(&components), + Some(Path::Upgrade(ClientUpgradePath::UpgradedClientConsensusState(0))), + ) + } + + #[test] + fn upgrade_client_state_path_parses() { + let path = "upgradedIBCState/0/upgradedClient"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!(path.unwrap(), Path::Upgrade(ClientUpgradePath::UpgradedClientState(0)),); + } + + #[test] + fn upgrade_client_consensus_state_path_parses() { + let path = "upgradedIBCState/0/upgradedConsState"; + let path = Path::from_str(path); + + assert!(path.is_ok()); + assert_eq!( + path.unwrap(), + Path::Upgrade(ClientUpgradePath::UpgradedClientConsensusState(0)), + ); + } } diff --git a/modules/src/core/ics24_host/validate.rs b/modules/src/core/ics24_host/validate.rs index 722d35b124..8069f24705 100644 --- a/modules/src/core/ics24_host/validate.rs +++ b/modules/src/core/ics24_host/validate.rs @@ -11,36 +11,33 @@ const VALID_SPECIAL_CHARS: &str = "._+-#[]<>"; /// A valid identifier only contain lowercase alphabetic characters, and be of a given min and max /// length. pub fn validate_identifier(id: &str, min: usize, max: usize) -> Result<(), Error> { - assert!(max >= min); - - // Check identifier is not empty - if id.is_empty() { - return Err(Error::empty()); - } - - // Check identifier does not contain path separators - if id.contains(PATH_SEPARATOR) { - return Err(Error::contain_separator(id.to_string())); - } - - // Check identifier length is between given min/max - if id.len() < min || id.len() > max { - return Err(Error::invalid_length(id.to_string(), id.len(), min, max)); - } - - // Check that the identifier comprises only valid characters: - // - Alphanumeric - // - `.`, `_`, `+`, `-`, `#` - // - `[`, `]`, `<`, `>` - if !id - .chars() - .all(|c| c.is_alphanumeric() || VALID_SPECIAL_CHARS.contains(c)) - { - return Err(Error::invalid_character(id.to_string())); - } - - // All good! - Ok(()) + assert!(max >= min); + + // Check identifier is not empty + if id.is_empty() { + return Err(Error::empty()) + } + + // Check identifier does not contain path separators + if id.contains(PATH_SEPARATOR) { + return Err(Error::contain_separator(id.to_string())) + } + + // Check identifier length is between given min/max + if id.len() < min || id.len() > max { + return Err(Error::invalid_length(id.to_string(), id.len(), min, max)) + } + + // Check that the identifier comprises only valid characters: + // - Alphanumeric + // - `.`, `_`, `+`, `-`, `#` + // - `[`, `]`, `<`, `>` + if !id.chars().all(|c| c.is_alphanumeric() || VALID_SPECIAL_CHARS.contains(c)) { + return Err(Error::invalid_character(id.to_string())) + } + + // All good! + Ok(()) } /// Default validator function for Client identifiers. @@ -48,7 +45,7 @@ pub fn validate_identifier(id: &str, min: usize, max: usize) -> Result<(), Error /// A valid identifier must be between 9-64 characters and only contain lowercase /// alphabetic characters, pub fn validate_client_identifier(id: &str) -> Result<(), Error> { - validate_identifier(id, 9, 64) + validate_identifier(id, 9, 64) } /// Default validator function for Connection identifiers. @@ -56,7 +53,7 @@ pub fn validate_client_identifier(id: &str) -> Result<(), Error> { /// A valid Identifier must be between 10-64 characters and only contain lowercase /// alphabetic characters, pub fn validate_connection_identifier(id: &str) -> Result<(), Error> { - validate_identifier(id, 10, 64) + validate_identifier(id, 10, 64) } /// Default validator function for Port identifiers. @@ -64,83 +61,83 @@ pub fn validate_connection_identifier(id: &str) -> Result<(), Error> { /// A valid Identifier must be between 2-128 characters and only contain lowercase /// alphabetic characters, pub fn validate_port_identifier(id: &str) -> Result<(), Error> { - validate_identifier(id, 2, 128) + validate_identifier(id, 2, 128) } #[cfg(test)] mod tests { - use crate::core::ics24_host::validate::{ - validate_client_identifier, validate_connection_identifier, validate_identifier, - validate_port_identifier, - }; - use test_log::test; - - #[test] - fn parse_invalid_port_id_min() { - // invalid min port id - let id = validate_port_identifier("p"); - assert!(id.is_err()) - } - - #[test] - fn parse_invalid_port_id_max() { - // invalid max port id (test string length is 130 chars) - let id = validate_port_identifier( + use crate::core::ics24_host::validate::{ + validate_client_identifier, validate_connection_identifier, validate_identifier, + validate_port_identifier, + }; + use test_log::test; + + #[test] + fn parse_invalid_port_id_min() { + // invalid min port id + let id = validate_port_identifier("p"); + assert!(id.is_err()) + } + + #[test] + fn parse_invalid_port_id_max() { + // invalid max port id (test string length is 130 chars) + let id = validate_port_identifier( "9anxkcme6je544d5lnj46zqiiiygfqzf8w4bjecbnyj4lj6s7zlpst67yln64tixp9anxkcme6je544d5lnj46zqiiiygfqzf8w4bjecbnyj4lj6s7zlpst67yln64tixp", ); - assert!(id.is_err()) - } - - #[test] - fn parse_invalid_connection_id_min() { - // invalid min connection id - let id = validate_connection_identifier("connect01"); - assert!(id.is_err()) - } - - #[test] - fn parse_connection_id_max() { - // invalid max connection id (test string length is 65) - let id = validate_connection_identifier( - "ihhankr30iy4nna65hjl2wjod7182io1t2s7u3ip3wqtbbn1sl0rgcntqc540r36r", - ); - assert!(id.is_err()) - } - - #[test] - fn parse_invalid_client_id_min() { - // invalid min client id - let id = validate_client_identifier("client"); - assert!(id.is_err()) - } - - #[test] - fn parse_client_id_max() { - // invalid max client id (test string length is 65) - let id = validate_client_identifier( - "f0isrs5enif9e4td3r2jcbxoevhz6u1fthn4aforq7ams52jn5m48eiesfht9ckpn", - ); - assert!(id.is_err()) - } - - #[test] - fn parse_invalid_id_chars() { - // invalid id chars - let id = validate_identifier("channel@01", 1, 10); - assert!(id.is_err()) - } - - #[test] - fn parse_invalid_id_empty() { - // invalid id empty - let id = validate_identifier("", 1, 10); - assert!(id.is_err()) - } - - #[test] - fn parse_invalid_id_path_separator() { - // invalid id with path separator - let id = validate_identifier("id/1", 1, 10); - assert!(id.is_err()) - } + assert!(id.is_err()) + } + + #[test] + fn parse_invalid_connection_id_min() { + // invalid min connection id + let id = validate_connection_identifier("connect01"); + assert!(id.is_err()) + } + + #[test] + fn parse_connection_id_max() { + // invalid max connection id (test string length is 65) + let id = validate_connection_identifier( + "ihhankr30iy4nna65hjl2wjod7182io1t2s7u3ip3wqtbbn1sl0rgcntqc540r36r", + ); + assert!(id.is_err()) + } + + #[test] + fn parse_invalid_client_id_min() { + // invalid min client id + let id = validate_client_identifier("client"); + assert!(id.is_err()) + } + + #[test] + fn parse_client_id_max() { + // invalid max client id (test string length is 65) + let id = validate_client_identifier( + "f0isrs5enif9e4td3r2jcbxoevhz6u1fthn4aforq7ams52jn5m48eiesfht9ckpn", + ); + assert!(id.is_err()) + } + + #[test] + fn parse_invalid_id_chars() { + // invalid id chars + let id = validate_identifier("channel@01", 1, 10); + assert!(id.is_err()) + } + + #[test] + fn parse_invalid_id_empty() { + // invalid id empty + let id = validate_identifier("", 1, 10); + assert!(id.is_err()) + } + + #[test] + fn parse_invalid_id_path_separator() { + // invalid id with path separator + let id = validate_identifier("id/1", 1, 10); + assert!(id.is_err()) + } } diff --git a/modules/src/core/ics26_routing/context.rs b/modules/src/core/ics26_routing/context.rs index c22cc0a14c..9a43a311a9 100644 --- a/modules/src/core/ics26_routing/context.rs +++ b/modules/src/core/ics26_routing/context.rs @@ -1,43 +1,40 @@ use crate::prelude::*; +use crate::{ + core::{ + ics02_client::context::{ClientKeeper, ClientReader}, + ics03_connection::context::{ConnectionKeeper, ConnectionReader}, + ics04_channel::{ + channel::{Counterparty, Order}, + context::{ChannelKeeper, ChannelReader}, + error::Error, + msgs::acknowledgement::Acknowledgement as GenericAcknowledgement, + packet::Packet, + Version, + }, + ics05_port::context::PortReader, + ics24_host::identifier::{ChannelId, ConnectionId, PortId}, + }, + events::ModuleEvent, + handler::HandlerOutputBuilder, + signer::Signer, +}; use alloc::borrow::{Borrow, Cow}; -use core::any::Any; -use core::fmt::Debug; -use core::{fmt, str::FromStr}; - +use core::{any::Any, fmt, fmt::Debug, str::FromStr}; use serde::{Deserialize, Serialize}; -use crate::core::ics02_client::context::{ClientKeeper, ClientReader}; -use crate::core::ics03_connection::context::{ConnectionKeeper, ConnectionReader}; -use crate::core::ics04_channel::channel::{Counterparty, Order}; -use crate::core::ics04_channel::context::{ChannelKeeper, ChannelReader}; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::msgs::acknowledgement::Acknowledgement as GenericAcknowledgement; -use crate::core::ics04_channel::packet::Packet; -use crate::core::ics04_channel::Version; -use crate::core::ics05_port::context::PortReader; -use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; -use crate::events::ModuleEvent; -use crate::handler::HandlerOutputBuilder; -use crate::signer::Signer; +/// This trait captures all the functional dependencies of needed in light client implementations +pub trait ReaderContext: ClientKeeper + ClientReader + ConnectionReader + ChannelReader {} /// This trait captures all the functional dependencies (i.e., context) which the ICS26 module /// requires to be able to dispatch and process IBC messages. In other words, this is the /// representation of a chain from the perspective of the IBC module of that chain. -pub trait Ics26Context: - ClientReader - + ClientKeeper - + ConnectionReader - + ConnectionKeeper - + ChannelKeeper - + ChannelReader - + PortReader -{ - type Router: Router; - - fn router(&self) -> &Self::Router; - - fn router_mut(&mut self) -> &mut Self::Router; +pub trait Ics26Context: ConnectionKeeper + ChannelKeeper + PortReader + ReaderContext { + type Router: Router; + + fn router(&self) -> &Self::Router; + + fn router_mut(&mut self) -> &mut Self::Router; } #[derive(Debug, PartialEq)] @@ -47,33 +44,33 @@ pub struct InvalidModuleId; pub struct ModuleId(String); impl ModuleId { - pub fn new(s: Cow<'_, str>) -> Result { - if !s.trim().is_empty() && s.chars().all(char::is_alphanumeric) { - Ok(Self(s.into_owned())) - } else { - Err(InvalidModuleId) - } - } + pub fn new(s: Cow<'_, str>) -> Result { + if !s.trim().is_empty() && s.chars().all(char::is_alphanumeric) { + Ok(Self(s.into_owned())) + } else { + Err(InvalidModuleId) + } + } } impl fmt::Display for ModuleId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } } impl FromStr for ModuleId { - type Err = InvalidModuleId; + type Err = InvalidModuleId; - fn from_str(s: &str) -> Result { - Self::new(Cow::Borrowed(s)) - } + fn from_str(s: &str) -> Result { + Self::new(Cow::Borrowed(s)) + } } impl Borrow for ModuleId { - fn borrow(&self) -> &str { - self.0.as_str() - } + fn borrow(&self) -> &str { + self.0.as_str() + } } /// Types implementing this trait are expected to implement `From` @@ -82,143 +79,143 @@ pub trait Acknowledgement: AsRef<[u8]> {} pub type WriteFn = dyn FnOnce(&mut dyn Any) -> Result<(), String>; pub enum OnRecvPacketAck { - Nil(Box), - Successful(Box, Box), - Failed(Box), + Nil(Box), + Successful(Box, Box), + Failed(Box), } impl OnRecvPacketAck { - pub fn is_successful(&self) -> bool { - matches!(self, OnRecvPacketAck::Successful(_, _)) - } + pub fn is_successful(&self) -> bool { + matches!(self, OnRecvPacketAck::Successful(_, _)) + } } pub type ModuleOutputBuilder = HandlerOutputBuilder<(), ModuleEvent>; -pub trait Module: Debug + Send + Sync + AsAnyMut + 'static { - #[allow(clippy::too_many_arguments)] - fn on_chan_open_init( - &mut self, - _output: &mut ModuleOutputBuilder, - _order: Order, - _connection_hops: &[ConnectionId], - _port_id: &PortId, - _channel_id: &ChannelId, - _counterparty: &Counterparty, - _version: &Version, - ) -> Result<(), Error> { - Ok(()) - } - - #[allow(clippy::too_many_arguments)] - fn on_chan_open_try( - &mut self, - _output: &mut ModuleOutputBuilder, - _order: Order, - _connection_hops: &[ConnectionId], - _port_id: &PortId, - _channel_id: &ChannelId, - _counterparty: &Counterparty, - _version: &Version, - _counterparty_version: &Version, - ) -> Result; - - fn on_chan_open_ack( - &mut self, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, - _counterparty_version: &Version, - ) -> Result<(), Error> { - Ok(()) - } - - fn on_chan_open_confirm( - &mut self, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, - ) -> Result<(), Error> { - Ok(()) - } - - fn on_chan_close_init( - &mut self, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, - ) -> Result<(), Error> { - Ok(()) - } - - fn on_chan_close_confirm( - &mut self, - _output: &mut ModuleOutputBuilder, - _port_id: &PortId, - _channel_id: &ChannelId, - ) -> Result<(), Error> { - Ok(()) - } - - fn on_recv_packet( - &self, - _output: &mut ModuleOutputBuilder, - _packet: &Packet, - _relayer: &Signer, - ) -> OnRecvPacketAck { - OnRecvPacketAck::Nil(Box::new(|_| Ok(()))) - } - - fn on_acknowledgement_packet( - &mut self, - _output: &mut ModuleOutputBuilder, - _packet: &Packet, - _acknowledgement: &GenericAcknowledgement, - _relayer: &Signer, - ) -> Result<(), Error> { - Ok(()) - } - - fn on_timeout_packet( - &mut self, - _output: &mut ModuleOutputBuilder, - _packet: &Packet, - _relayer: &Signer, - ) -> Result<(), Error> { - Ok(()) - } +pub trait Module: Send + Sync + AsAnyMut { + #[allow(clippy::too_many_arguments)] + fn on_chan_open_init( + &mut self, + _output: &mut ModuleOutputBuilder, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + _version: &Version, + ) -> Result<(), Error> { + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + fn on_chan_open_try( + &mut self, + _output: &mut ModuleOutputBuilder, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + _version: &Version, + _counterparty_version: &Version, + ) -> Result; + + fn on_chan_open_ack( + &mut self, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty_version: &Version, + ) -> Result<(), Error> { + Ok(()) + } + + fn on_chan_open_confirm( + &mut self, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result<(), Error> { + Ok(()) + } + + fn on_chan_close_init( + &mut self, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result<(), Error> { + Ok(()) + } + + fn on_chan_close_confirm( + &mut self, + _output: &mut ModuleOutputBuilder, + _port_id: &PortId, + _channel_id: &ChannelId, + ) -> Result<(), Error> { + Ok(()) + } + + fn on_recv_packet( + &self, + _output: &mut ModuleOutputBuilder, + _packet: &Packet, + _relayer: &Signer, + ) -> OnRecvPacketAck { + OnRecvPacketAck::Nil(Box::new(|_| Ok(()))) + } + + fn on_acknowledgement_packet( + &mut self, + _output: &mut ModuleOutputBuilder, + _packet: &Packet, + _acknowledgement: &GenericAcknowledgement, + _relayer: &Signer, + ) -> Result<(), Error> { + Ok(()) + } + + fn on_timeout_packet( + &mut self, + _output: &mut ModuleOutputBuilder, + _packet: &Packet, + _relayer: &Signer, + ) -> Result<(), Error> { + Ok(()) + } } pub trait RouterBuilder: Sized { - /// The `Router` type that the builder must build - type Router: Router; + /// The `Router` type that the builder must build + type Router: Router; - /// Registers `Module` against the specified `ModuleId` in the `Router`'s internal map - /// - /// Returns an error if a `Module` has already been registered against the specified `ModuleId` - fn add_route(self, module_id: ModuleId, module: impl Module) -> Result; + /// Registers `Module` against the specified `ModuleId` in the `Router`'s internal map + /// + /// Returns an error if a `Module` has already been registered against the specified `ModuleId` + fn add_route(self, module_id: ModuleId, module: impl Module) -> Result; - /// Consumes the `RouterBuilder` and returns a `Router` as configured - fn build(self) -> Self::Router; + /// Consumes the `RouterBuilder` and returns a `Router` as configured + fn build(self) -> Self::Router; } pub trait AsAnyMut: Any { - fn as_any_mut(&mut self) -> &mut dyn Any; + fn as_any_mut(&mut self) -> &mut dyn Any; } impl AsAnyMut for M { - fn as_any_mut(&mut self) -> &mut dyn Any { - self - } + fn as_any_mut(&mut self) -> &mut dyn Any { + self + } } /// A router maintains a mapping of `ModuleId`s against `Modules`. Implementations must not publicly /// expose APIs to add new routes once constructed. Routes may only be added at the time of /// instantiation using the `RouterBuilder`. pub trait Router { - /// Returns a mutable reference to a `Module` registered against the specified `ModuleId` - fn get_route_mut(&mut self, module_id: &impl Borrow) -> Option<&mut dyn Module>; + /// Returns a mutable reference to a `Module` registered against the specified `ModuleId` + fn get_route_mut(&mut self, module_id: &impl Borrow) -> Option<&mut dyn Module>; - /// Returns true if the `Router` has a `Module` registered against the specified `ModuleId` - fn has_route(&self, module_id: &impl Borrow) -> bool; + /// Returns true if the `Router` has a `Module` registered against the specified `ModuleId` + fn has_route(&self, module_id: &impl Borrow) -> bool; } diff --git a/modules/src/core/ics26_routing/error.rs b/modules/src/core/ics26_routing/error.rs index 260a688cdd..e2aa0e2f88 100644 --- a/modules/src/core/ics26_routing/error.rs +++ b/modules/src/core/ics26_routing/error.rs @@ -1,36 +1,36 @@ use crate::prelude::*; use flex_error::{define_error, TraceError}; -use crate::applications::transfer; -use crate::core::ics02_client; -use crate::core::ics03_connection; -use crate::core::ics04_channel; +use crate::{ + applications::transfer, + core::{ics02_client, ics03_connection, ics04_channel}, +}; define_error! { - #[derive(Debug, PartialEq, Eq)] - Error { - Ics02Client - [ ics02_client::error::Error ] - | _ | { "ICS02 client error" }, + #[derive(Debug, PartialEq, Eq)] + Error { + Ics02Client + [ ics02_client::error::Error ] + | _ | { "ICS02 client error" }, - Ics03Connection - [ ics03_connection::error::Error ] - | _ | { "ICS03 connection error" }, + Ics03Connection + [ ics03_connection::error::Error ] + | _ | { "ICS03 connection error" }, - Ics04Channel - [ ics04_channel::error::Error ] - | _ | { "ICS04 channel error" }, + Ics04Channel + [ ics04_channel::error::Error ] + | _ | { "ICS04 channel error" }, - Ics20FungibleTokenTransfer - [ transfer::error::Error ] - | _ | { "ICS20 fungible token transfer error" }, + Ics20FungibleTokenTransfer + [ transfer::error::Error ] + | _ | { "ICS20 fungible token transfer error" }, - UnknownMessageTypeUrl - { url: String } - | e | { format_args!("unknown type URL {0}", e.url) }, + UnknownMessageTypeUrl + { url: String } + | e | { format_args!("unknown type URL {0}", e.url) }, - MalformedMessageBytes - [ TraceError ] - | _ | { "the message is malformed and cannot be decoded" }, - } + MalformedMessageBytes + [ TraceError ] + | _ | { "the message is malformed and cannot be decoded" }, + } } diff --git a/modules/src/core/ics26_routing/handler.rs b/modules/src/core/ics26_routing/handler.rs index b5d5362ec5..c28fd8e86c 100644 --- a/modules/src/core/ics26_routing/handler.rs +++ b/modules/src/core/ics26_routing/handler.rs @@ -1,30 +1,35 @@ -use crate::prelude::*; - -use ibc_proto::google::protobuf::Any; - -use crate::core::ics02_client::handler::dispatch as ics2_msg_dispatcher; -use crate::core::ics03_connection::handler::dispatch as ics3_msg_dispatcher; -use crate::core::ics04_channel::handler::{ - channel_callback as ics4_callback, channel_dispatch as ics4_msg_dispatcher, - channel_validate as ics4_validate, recv_packet::RecvPacketResult, +use crate::core::ics02_client::context::ClientTypes; +use crate::{ + core::{ + ics02_client::{context::ClientKeeper, handler::dispatch as ics2_msg_dispatcher}, + ics03_connection::handler::dispatch as ics3_msg_dispatcher, + ics04_channel::{ + handler::{ + channel_callback as ics4_callback, channel_dispatch as ics4_msg_dispatcher, + channel_validate as ics4_validate, get_module_for_packet_msg, + packet_callback as ics4_packet_callback, + packet_dispatch as ics4_packet_msg_dispatcher, recv_packet::RecvPacketResult, + }, + packet::PacketResult, + }, + ics26_routing::{ + context::{Ics26Context, ModuleOutputBuilder, ReaderContext}, + error::Error, + msgs::Ics26Envelope::{self, Ics2Msg, Ics3Msg, Ics4ChannelMsg, Ics4PacketMsg}, + }, + }, + events::IbcEvent, + handler::HandlerOutput, + prelude::*, }; -use crate::core::ics04_channel::handler::{ - get_module_for_packet_msg, packet_callback as ics4_packet_callback, - packet_dispatch as ics4_packet_msg_dispatcher, -}; -use crate::core::ics04_channel::packet::PacketResult; -use crate::core::ics26_routing::context::{Ics26Context, ModuleOutputBuilder}; -use crate::core::ics26_routing::error::Error; -use crate::core::ics26_routing::msgs::Ics26Envelope::{ - self, Ics2Msg, Ics3Msg, Ics4ChannelMsg, Ics4PacketMsg, -}; -use crate::{events::IbcEvent, handler::HandlerOutput}; +use core::fmt::Debug; +use ibc_proto::google::protobuf::Any; /// Result of message execution - comprises of events emitted and logs entries created during the /// execution of a transaction message. pub struct MsgReceipt { - pub events: Vec, - pub log: Vec, + pub events: Vec, + pub log: Vec, } /// Mimics the DeliverTx ABCI interface, but for a single message and at a slightly lower level. @@ -32,20 +37,27 @@ pub struct MsgReceipt { /// Returns a vector of all events that got generated as a byproduct of processing `message`. pub fn deliver(ctx: &mut Ctx, message: Any) -> Result where - Ctx: Ics26Context, + Ctx: Ics26Context + ReaderContext, + Ics26Envelope: TryFrom, + Error: From< as TryFrom>::Error>, { - // Decode the proto message into a domain message, creating an ICS26 envelope. - let envelope = decode(message)?; + // Decode the proto message into a domain message, creating an ICS26 envelope. + let envelope = decode::(message)?; - // Process the envelope, and accumulate any events that were generated. - let HandlerOutput { log, events, .. } = dispatch(ctx, envelope)?; + // Process the envelope, and accumulate any events that were generated. + let HandlerOutput { log, events, .. } = dispatch::<_>(ctx, envelope)?; - Ok(MsgReceipt { events, log }) + Ok(MsgReceipt { events, log }) } /// Attempts to convert a message into a [Ics26Envelope] message -pub fn decode(message: Any) -> Result { - message.try_into() +pub fn decode(message: Any) -> Result, Error> +where + C: ClientTypes + Clone + Debug + PartialEq + Eq, + Ics26Envelope: TryFrom, + Error: From< as TryFrom>::Error>, +{ + message.try_into().map_err(Into::into) } /// Top-level ICS dispatch function. Routes incoming IBC messages to their corresponding module. @@ -53,511 +65,513 @@ pub fn decode(message: Any) -> Result { /// and events produced after processing the input `msg`. /// If this method returns an error, the runtime is expected to rollback all state modifications to /// the `Ctx` caused by all messages from the transaction that this `msg` is a part of. -pub fn dispatch(ctx: &mut Ctx, msg: Ics26Envelope) -> Result, Error> +pub fn dispatch(ctx: &mut Ctx, msg: Ics26Envelope) -> Result, Error> where - Ctx: Ics26Context, + Ctx: Ics26Context + ClientKeeper, { - let output = match msg { - Ics2Msg(msg) => { - let handler_output = ics2_msg_dispatcher(ctx, msg).map_err(Error::ics02_client)?; - - // Apply the result to the context (host chain store). - ctx.store_client_result(handler_output.result) - .map_err(Error::ics02_client)?; - - HandlerOutput::builder() - .with_log(handler_output.log) - .with_events(handler_output.events) - .with_result(()) - } - - Ics3Msg(msg) => { - let handler_output = ics3_msg_dispatcher(ctx, msg).map_err(Error::ics03_connection)?; - - // Apply any results to the host chain store. - ctx.store_connection_result(handler_output.result) - .map_err(Error::ics03_connection)?; - - HandlerOutput::builder() - .with_log(handler_output.log) - .with_events(handler_output.events) - .with_result(()) - } - - Ics4ChannelMsg(msg) => { - let module_id = ics4_validate(ctx, &msg).map_err(Error::ics04_channel)?; - let (mut handler_builder, channel_result) = - ics4_msg_dispatcher(ctx, &msg).map_err(Error::ics04_channel)?; - - let mut module_output = ModuleOutputBuilder::new(); - let cb_result = - ics4_callback(ctx, &module_id, &msg, channel_result, &mut module_output); - handler_builder.merge(module_output); - let channel_result = cb_result.map_err(Error::ics04_channel)?; - - // Apply any results to the host chain store. - ctx.store_channel_result(channel_result) - .map_err(Error::ics04_channel)?; - - handler_builder.with_result(()) - } - - Ics4PacketMsg(msg) => { - let module_id = get_module_for_packet_msg(ctx, &msg).map_err(Error::ics04_channel)?; - let (mut handler_builder, packet_result) = - ics4_packet_msg_dispatcher(ctx, &msg).map_err(Error::ics04_channel)?; - - if matches!(packet_result, PacketResult::Recv(RecvPacketResult::NoOp)) { - return Ok(handler_builder.with_result(())); - } - - let mut module_output = ModuleOutputBuilder::new(); - let cb_result = ics4_packet_callback(ctx, &module_id, &msg, &mut module_output); - handler_builder.merge(module_output); - cb_result.map_err(Error::ics04_channel)?; - - // Apply any results to the host chain store. - ctx.store_packet_result(packet_result) - .map_err(Error::ics04_channel)?; - - handler_builder.with_result(()) - } - }; - - Ok(output) + let output = match msg { + Ics2Msg(msg) => { + let handler_output = + ics2_msg_dispatcher::(ctx, msg).map_err(Error::ics02_client)?; + + // Apply the result to the context (host chain store). + ctx.store_client_result(handler_output.result).map_err(Error::ics02_client)?; + + HandlerOutput::builder() + .with_log(handler_output.log) + .with_events(handler_output.events) + .with_result(()) + }, + + Ics3Msg(msg) => { + let handler_output = + ics3_msg_dispatcher::<_>(ctx, msg).map_err(Error::ics03_connection)?; + + // Apply any results to the host chain store. + ctx.store_connection_result(handler_output.result) + .map_err(Error::ics03_connection)?; + + HandlerOutput::builder() + .with_log(handler_output.log) + .with_events(handler_output.events) + .with_result(()) + }, + + Ics4ChannelMsg(msg) => { + let module_id = ics4_validate(ctx, &msg).map_err(Error::ics04_channel)?; + let (mut handler_builder, channel_result) = + ics4_msg_dispatcher::<_>(ctx, &msg).map_err(Error::ics04_channel)?; + + let mut module_output = ModuleOutputBuilder::new(); + let cb_result = + ics4_callback(ctx, &module_id, &msg, channel_result, &mut module_output); + handler_builder.merge(module_output); + let channel_result = cb_result.map_err(Error::ics04_channel)?; + + // Apply any results to the host chain store. + ctx.store_channel_result(channel_result).map_err(Error::ics04_channel)?; + + handler_builder.with_result(()) + }, + + Ics4PacketMsg(msg) => { + let module_id = get_module_for_packet_msg(ctx, &msg).map_err(Error::ics04_channel)?; + let (mut handler_builder, packet_result) = + ics4_packet_msg_dispatcher::<_>(ctx, &msg).map_err(Error::ics04_channel)?; + + if matches!(packet_result, PacketResult::Recv(RecvPacketResult::NoOp)) { + return Ok(handler_builder.with_result(())); + } + + let mut module_output = ModuleOutputBuilder::new(); + let cb_result = ics4_packet_callback(ctx, &module_id, &msg, &mut module_output); + handler_builder.merge(module_output); + cb_result.map_err(Error::ics04_channel)?; + + // Apply any results to the host chain store. + ctx.store_packet_result(packet_result).map_err(Error::ics04_channel)?; + + handler_builder.with_result(()) + }, + }; + + Ok(output) } #[cfg(test)] mod tests { - use crate::prelude::*; - - use test_log::test; - - use crate::applications::transfer::context::test::deliver as ics20_deliver; - use crate::applications::transfer::PrefixedCoin; - use crate::core::ics02_client::client_consensus::AnyConsensusState; - use crate::core::ics02_client::client_state::AnyClientState; - use crate::core::ics02_client::msgs::{ - create_client::MsgCreateAnyClient, update_client::MsgUpdateAnyClient, - upgrade_client::MsgUpgradeAnyClient, ClientMsg, - }; - use crate::core::ics03_connection::msgs::{ - conn_open_ack::{test_util::get_dummy_raw_msg_conn_open_ack, MsgConnectionOpenAck}, - conn_open_init::{test_util::get_dummy_raw_msg_conn_open_init, MsgConnectionOpenInit}, - conn_open_try::{test_util::get_dummy_raw_msg_conn_open_try, MsgConnectionOpenTry}, - ConnectionMsg, - }; - use crate::core::ics04_channel::msgs::{ - chan_close_confirm::{ - test_util::get_dummy_raw_msg_chan_close_confirm, MsgChannelCloseConfirm, - }, - chan_close_init::{test_util::get_dummy_raw_msg_chan_close_init, MsgChannelCloseInit}, - chan_open_ack::{test_util::get_dummy_raw_msg_chan_open_ack, MsgChannelOpenAck}, - chan_open_init::{test_util::get_dummy_raw_msg_chan_open_init, MsgChannelOpenInit}, - chan_open_try::{test_util::get_dummy_raw_msg_chan_open_try, MsgChannelOpenTry}, - recv_packet::{test_util::get_dummy_raw_msg_recv_packet, MsgRecvPacket}, - timeout_on_close::{test_util::get_dummy_raw_msg_timeout_on_close, MsgTimeoutOnClose}, - ChannelMsg, PacketMsg, - }; - use crate::core::ics23_commitment::commitment::test_util::get_dummy_merkle_proof; - use crate::events::IbcEvent; - use crate::{ - applications::transfer::msgs::transfer::test_util::get_dummy_msg_transfer, - applications::transfer::msgs::transfer::MsgTransfer, - applications::transfer::packet::PacketData, applications::transfer::MODULE_ID_STR, - }; - - use crate::core::ics24_host::identifier::ConnectionId; - use crate::core::ics26_routing::context::{Ics26Context, ModuleId, Router, RouterBuilder}; - use crate::core::ics26_routing::error::Error; - use crate::core::ics26_routing::handler::dispatch; - use crate::core::ics26_routing::msgs::Ics26Envelope; - use crate::handler::HandlerOutputBuilder; - use crate::mock::client_state::{MockClientState, MockConsensusState}; - use crate::mock::context::{MockContext, MockRouterBuilder}; - use crate::mock::header::MockHeader; - use crate::test_utils::{get_dummy_account_id, DummyTransferModule}; - use crate::timestamp::Timestamp; - use crate::Height; - - #[test] - /// These tests exercise two main paths: (1) the ability of the ICS26 routing module to dispatch - /// messages to the correct module handler, and more importantly: (2) the ability of ICS handlers - /// to work with the context and correctly store results (i.e., the `ClientKeeper`, - /// `ConnectionKeeper`, and `ChannelKeeper` traits). - fn routing_module_and_keepers() { - #[derive(Clone, Debug)] - enum TestMsg { - Ics26(Ics26Envelope), - Ics20(MsgTransfer), - } - - impl From for TestMsg { - fn from(msg: Ics26Envelope) -> Self { - Self::Ics26(msg) - } - } - - impl From> for TestMsg { - fn from(msg: MsgTransfer) -> Self { - Self::Ics20(msg) - } - } - - // Test parameters - struct Test { - name: String, - msg: TestMsg, - want_pass: bool, - } - let default_signer = get_dummy_account_id(); - let client_height = 5; - let start_client_height = Height::new(0, client_height); - let update_client_height = Height::new(0, 34); - let update_client_height_after_send = Height::new(0, 35); - - let update_client_height_after_second_send = Height::new(0, 36); - - let upgrade_client_height = Height::new(1, 2); - - let upgrade_client_height_second = Height::new(1, 1); - - let transfer_module_id: ModuleId = MODULE_ID_STR.parse().unwrap(); - - // We reuse this same context across all tests. Nothing in particular needs parametrizing. - let mut ctx = { - let ctx = MockContext::default(); - let module = DummyTransferModule::new(ctx.ibc_store_share()); - let router = MockRouterBuilder::default() - .add_route(transfer_module_id.clone(), module) - .unwrap() - .build(); - ctx.with_router(router) - }; - - let create_client_msg = MsgCreateAnyClient::new( - AnyClientState::from(MockClientState::new(MockHeader::new(start_client_height))), - AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new( - start_client_height, - ))), - default_signer.clone(), - ) - .unwrap(); - - // - // Connection handshake messages. - // - let msg_conn_init = - MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); - - let correct_msg_conn_try = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( - client_height, - client_height, - )) - .unwrap(); - - // The handler will fail to process this msg because the client height is too advanced. - let incorrect_msg_conn_try = MsgConnectionOpenTry::try_from( - get_dummy_raw_msg_conn_open_try(client_height + 1, client_height + 1), - ) - .unwrap(); - - let msg_conn_ack = MsgConnectionOpenAck::try_from(get_dummy_raw_msg_conn_open_ack( - client_height, - client_height, - )) - .unwrap(); - - // - // Channel handshake messages. - // - let msg_chan_init = - MsgChannelOpenInit::try_from(get_dummy_raw_msg_chan_open_init()).unwrap(); - - // The handler will fail to process this b/c the associated connection does not exist - let mut incorrect_msg_chan_init = msg_chan_init.clone(); - incorrect_msg_chan_init.channel.connection_hops = vec![ConnectionId::new(590)]; - - let msg_chan_try = - MsgChannelOpenTry::try_from(get_dummy_raw_msg_chan_open_try(client_height)).unwrap(); - - let msg_chan_ack = - MsgChannelOpenAck::try_from(get_dummy_raw_msg_chan_open_ack(client_height)).unwrap(); - - let msg_chan_close_init = - MsgChannelCloseInit::try_from(get_dummy_raw_msg_chan_close_init()).unwrap(); - - let msg_chan_close_confirm = - MsgChannelCloseConfirm::try_from(get_dummy_raw_msg_chan_close_confirm(client_height)) - .unwrap(); - - let msg_transfer = get_dummy_msg_transfer(35); - let msg_transfer_two = get_dummy_msg_transfer(36); - - let mut msg_to_on_close = - MsgTimeoutOnClose::try_from(get_dummy_raw_msg_timeout_on_close(36, 5)).unwrap(); - msg_to_on_close.packet.sequence = 2.into(); - msg_to_on_close.packet.timeout_height = msg_transfer_two.timeout_height; - msg_to_on_close.packet.timeout_timestamp = msg_transfer_two.timeout_timestamp; - - let denom = msg_transfer_two.token.denom.clone(); - let packet_data = { - let data = PacketData { - token: PrefixedCoin { - denom, - amount: msg_transfer_two.token.amount, - }, - sender: msg_transfer_two.sender.clone(), - receiver: msg_transfer_two.receiver.clone(), - }; - serde_json::to_vec(&data).expect("PacketData's infallible Serialize impl failed") - }; - msg_to_on_close.packet.data = packet_data; - - let msg_recv_packet = MsgRecvPacket::try_from(get_dummy_raw_msg_recv_packet(35)).unwrap(); - - // First, create a client.. - let res = dispatch( - &mut ctx, - Ics26Envelope::Ics2Msg(ClientMsg::CreateClient(create_client_msg.clone())), - ); - - assert!( + use crate::prelude::*; + + use test_log::test; + + use crate::{ + applications::transfer::{ + context::test::deliver as ics20_deliver, + msgs::transfer::{test_util::get_dummy_msg_transfer, MsgTransfer}, + packet::PacketData, + PrefixedCoin, MODULE_ID_STR, + }, + core::{ + ics02_client::msgs::{ + create_client::MsgCreateAnyClient, update_client::MsgUpdateAnyClient, + upgrade_client::MsgUpgradeAnyClient, ClientMsg, + }, + ics03_connection::msgs::{ + conn_open_ack::{test_util::get_dummy_raw_msg_conn_open_ack, MsgConnectionOpenAck}, + conn_open_init::{ + test_util::get_dummy_raw_msg_conn_open_init, MsgConnectionOpenInit, + }, + conn_open_try::{test_util::get_dummy_raw_msg_conn_open_try, MsgConnectionOpenTry}, + ConnectionMsg, + }, + ics04_channel::msgs::{ + chan_close_confirm::{ + test_util::get_dummy_raw_msg_chan_close_confirm, MsgChannelCloseConfirm, + }, + chan_close_init::{ + test_util::get_dummy_raw_msg_chan_close_init, MsgChannelCloseInit, + }, + chan_open_ack::{test_util::get_dummy_raw_msg_chan_open_ack, MsgChannelOpenAck}, + chan_open_init::{test_util::get_dummy_raw_msg_chan_open_init, MsgChannelOpenInit}, + chan_open_try::{test_util::get_dummy_raw_msg_chan_open_try, MsgChannelOpenTry}, + recv_packet::{test_util::get_dummy_raw_msg_recv_packet, MsgRecvPacket}, + timeout_on_close::{ + test_util::get_dummy_raw_msg_timeout_on_close, MsgTimeoutOnClose, + }, + ChannelMsg, PacketMsg, + }, + }, + events::IbcEvent, + mock::client_state::{AnyClientState, AnyConsensusState}, + }; + + use crate::mock::header::MockClientMessage; + use crate::{ + core::{ + ics24_host::identifier::ConnectionId, + ics26_routing::{ + context::{Ics26Context, ModuleId, Router, RouterBuilder}, + error::Error, + handler::dispatch, + msgs::Ics26Envelope, + }, + }, + handler::HandlerOutputBuilder, + mock::{ + client_state::{MockClientState, MockConsensusState}, + context::{MockClientTypes, MockContext, MockRouterBuilder}, + header::MockHeader, + }, + test_utils::{get_dummy_account_id, DummyTransferModule}, + timestamp::Timestamp, + Height, + }; + + #[test] + /// These tests exercise two main paths: (1) the ability of the ICS26 routing module to dispatch + /// messages to the correct module handler, and more importantly: (2) the ability of ICS + /// handlers to work with the context and correctly store results (i.e., the `ClientKeeper`, + /// `ConnectionKeeper`, and `ChannelKeeper` traits). + fn routing_module_and_keepers() { + #[derive(Clone, Debug)] + enum TestMsg { + Ics26(Ics26Envelope>), + Ics20(MsgTransfer), + } + + impl From>> for TestMsg { + fn from(msg: Ics26Envelope>) -> Self { + Self::Ics26(msg) + } + } + + impl From> for TestMsg { + fn from(msg: MsgTransfer) -> Self { + Self::Ics20(msg) + } + } + + // Test parameters + struct Test { + name: String, + msg: TestMsg, + want_pass: bool, + } + let default_signer = get_dummy_account_id(); + let client_height = 5; + let start_client_height = Height::new(0, client_height); + let update_client_height = Height::new(0, 34); + let update_client_height_after_send = Height::new(0, 35); + + let update_client_height_after_second_send = Height::new(0, 36); + + let upgrade_client_height = Height::new(1, 2); + + let upgrade_client_height_second = Height::new(1, 1); + + let transfer_module_id: ModuleId = MODULE_ID_STR.parse().unwrap(); + + // We reuse this same context across all tests. Nothing in particular needs parametrizing. + let mut ctx = { + let ctx = MockContext::default(); + let module = DummyTransferModule::new(ctx.ibc_store_share()); + let router = MockRouterBuilder::default() + .add_route(transfer_module_id.clone(), module) + .unwrap() + .build(); + ctx.with_router(router) + }; + + let create_client_msg = MsgCreateAnyClient::new( + AnyClientState::from(MockClientState::new(MockClientMessage::Header(MockHeader::new( + start_client_height, + )))), + AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new(start_client_height))), + default_signer.clone(), + ) + .unwrap(); + + // + // Connection handshake messages. + // + let msg_conn_init = + MsgConnectionOpenInit::try_from(get_dummy_raw_msg_conn_open_init()).unwrap(); + + let correct_msg_conn_try = MsgConnectionOpenTry::try_from(get_dummy_raw_msg_conn_open_try( + client_height, + client_height, + )) + .unwrap(); + + // The handler will fail to process this msg because the client height is too advanced. + let incorrect_msg_conn_try = MsgConnectionOpenTry::try_from( + get_dummy_raw_msg_conn_open_try(client_height + 1, client_height + 1), + ) + .unwrap(); + + let msg_conn_ack = MsgConnectionOpenAck::try_from(get_dummy_raw_msg_conn_open_ack( + client_height, + client_height, + )) + .unwrap(); + + // + // Channel handshake messages. + // + let msg_chan_init = + MsgChannelOpenInit::try_from(get_dummy_raw_msg_chan_open_init()).unwrap(); + + // The handler will fail to process this b/c the associated connection does not exist + let mut incorrect_msg_chan_init = msg_chan_init.clone(); + incorrect_msg_chan_init.channel.connection_hops = vec![ConnectionId::new(590)]; + + let msg_chan_try = + MsgChannelOpenTry::try_from(get_dummy_raw_msg_chan_open_try(client_height)).unwrap(); + + let msg_chan_ack = + MsgChannelOpenAck::try_from(get_dummy_raw_msg_chan_open_ack(client_height)).unwrap(); + + let msg_chan_close_init = + MsgChannelCloseInit::try_from(get_dummy_raw_msg_chan_close_init()).unwrap(); + + let msg_chan_close_confirm = + MsgChannelCloseConfirm::try_from(get_dummy_raw_msg_chan_close_confirm(client_height)) + .unwrap(); + + let msg_transfer = get_dummy_msg_transfer(35); + let msg_transfer_two = get_dummy_msg_transfer(36); + + let mut msg_to_on_close = + MsgTimeoutOnClose::try_from(get_dummy_raw_msg_timeout_on_close(36, 5)).unwrap(); + msg_to_on_close.packet.sequence = 2.into(); + msg_to_on_close.packet.timeout_height = msg_transfer_two.timeout_height; + msg_to_on_close.packet.timeout_timestamp = msg_transfer_two.timeout_timestamp; + + let denom = msg_transfer_two.token.denom.clone(); + let packet_data = { + let data = PacketData { + token: PrefixedCoin { denom, amount: msg_transfer_two.token.amount }, + sender: msg_transfer_two.sender.clone(), + receiver: msg_transfer_two.receiver.clone(), + }; + serde_json::to_vec(&data).expect("PacketData's infallible Serialize impl failed") + }; + msg_to_on_close.packet.data = packet_data; + + let msg_recv_packet = MsgRecvPacket::try_from(get_dummy_raw_msg_recv_packet(35)).unwrap(); + + // First, create a client.. + let res = dispatch( + &mut ctx, + Ics26Envelope::Ics2Msg(ClientMsg::CreateClient(create_client_msg.clone())), + ); + + assert!( res.is_ok(), "ICS26 routing dispatch test 'client creation' failed for message {:?} with result: {:?}", create_client_msg, res ); - ctx.scope_port_to_module(msg_chan_init.port_id.clone(), transfer_module_id.clone()); - - // Figure out the ID of the client that was just created. - let mut events = res.unwrap().events; - let client_id_event = events.pop(); - assert!( - client_id_event.is_some(), - "There was no event generated for client creation!" - ); - let client_id = match client_id_event.unwrap() { - IbcEvent::CreateClient(create_client) => create_client.client_id().clone(), - event => panic!("unexpected IBC event: {:?}", event), - }; - - let tests: Vec = vec![ - // Test some ICS2 client functionality. - Test { - name: "Client update successful".to_string(), - msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { - client_id: client_id.clone(), - header: MockHeader::new(update_client_height) - .with_timestamp(Timestamp::now()) - .into(), - signer: default_signer.clone(), - })) - .into(), - want_pass: true, - }, - Test { - name: "Client update fails due to stale header".to_string(), - msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { - client_id: client_id.clone(), - header: MockHeader::new(update_client_height).into(), - signer: default_signer.clone(), - })) - .into(), - want_pass: false, - }, - Test { - name: "Connection open init succeeds".to_string(), - msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenInit( - msg_conn_init.with_client_id(client_id.clone()), - )) - .into(), - want_pass: true, - }, - Test { - name: "Connection open try fails due to InvalidConsensusHeight (too high)" - .to_string(), - msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenTry(Box::new( - incorrect_msg_conn_try, - ))) - .into(), - want_pass: false, - }, - Test { - name: "Connection open try succeeds".to_string(), - msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenTry(Box::new( - correct_msg_conn_try.with_client_id(client_id.clone()), - ))) - .into(), - want_pass: true, - }, - Test { - name: "Connection open ack succeeds".to_string(), - msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenAck(Box::new( - msg_conn_ack, - ))) - .into(), - want_pass: true, - }, - // ICS04 - Test { - name: "Channel open init succeeds".to_string(), - msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenInit(msg_chan_init)) - .into(), - want_pass: true, - }, - Test { - name: "Channel open init fail due to missing connection".to_string(), - msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenInit( - incorrect_msg_chan_init, - )) - .into(), - want_pass: false, - }, - Test { - name: "Channel open try succeeds".to_string(), - msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenTry(msg_chan_try)).into(), - want_pass: true, - }, - Test { - name: "Channel open ack succeeds".to_string(), - msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenAck(msg_chan_ack)).into(), - want_pass: true, - }, - Test { - name: "Packet send".to_string(), - msg: msg_transfer.into(), - want_pass: true, - }, - // The client update is required in this test, because the proof associated with - // msg_recv_packet has the same height as the packet TO height (see get_dummy_raw_msg_recv_packet) - Test { - name: "Client update successful #2".to_string(), - msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { - client_id: client_id.clone(), - header: MockHeader::new(update_client_height_after_send) - .with_timestamp(Timestamp::now()) - .into(), - signer: default_signer.clone(), - })) - .into(), - want_pass: true, - }, - Test { - name: "Receive packet".to_string(), - msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket(msg_recv_packet.clone())) - .into(), - want_pass: true, - }, - Test { - name: "Re-Receive packet".to_string(), - msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket(msg_recv_packet)).into(), - want_pass: true, - }, - Test { - name: "Packet send".to_string(), - msg: msg_transfer_two.into(), - want_pass: true, - }, - Test { - name: "Client update successful".to_string(), - msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { - client_id: client_id.clone(), - header: MockHeader::new(update_client_height_after_second_send).into(), - signer: default_signer.clone(), - })) - .into(), - want_pass: true, - }, - //ICS04-close channel - Test { - name: "Channel close init succeeds".to_string(), - msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseInit( - msg_chan_close_init, - )) - .into(), - want_pass: true, - }, - Test { - name: "Channel close confirm fails cause channel is already closed".to_string(), - msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseConfirm( - msg_chan_close_confirm, - )) - .into(), - want_pass: false, - }, - //ICS04-to_on_close - Test { - name: "Timeout on close".to_string(), - msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::ToClosePacket(msg_to_on_close)).into(), - want_pass: true, - }, - Test { - name: "Client upgrade successful".to_string(), - msg: Ics26Envelope::Ics2Msg(ClientMsg::UpgradeClient(MsgUpgradeAnyClient::new( - client_id.clone(), - AnyClientState::Mock(MockClientState::new(MockHeader::new( - upgrade_client_height, - ))), - AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new( - upgrade_client_height, - ))), - get_dummy_merkle_proof(), - get_dummy_merkle_proof(), - default_signer.clone(), - ))) - .into(), - want_pass: true, - }, - Test { - name: "Client upgrade un-successful".to_string(), - msg: Ics26Envelope::Ics2Msg(ClientMsg::UpgradeClient(MsgUpgradeAnyClient::new( - client_id, - AnyClientState::Mock(MockClientState::new(MockHeader::new( - upgrade_client_height_second, - ))), - AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new( - upgrade_client_height_second, - ))), - get_dummy_merkle_proof(), - get_dummy_merkle_proof(), - default_signer, - ))) - .into(), - want_pass: false, - }, - ] - .into_iter() - .collect(); - - for test in tests { - let res = match test.msg.clone() { - TestMsg::Ics26(msg) => dispatch(&mut ctx, msg).map(|_| ()), - TestMsg::Ics20(msg) => { - let transfer_module = - ctx.router_mut().get_route_mut(&transfer_module_id).unwrap(); - ics20_deliver( - transfer_module - .as_any_mut() - .downcast_mut::() - .unwrap(), - &mut HandlerOutputBuilder::new(), - msg, - ) - .map(|_| ()) - .map_err(Error::ics04_channel) - } - }; - - assert_eq!( - test.want_pass, - res.is_ok(), - "ICS26 routing dispatch test '{}' failed for message {:?}\nwith result: {:?}", - test.name, - test.msg, - res - ); - } - } + ctx.scope_port_to_module(msg_chan_init.port_id.clone(), transfer_module_id.clone()); + + // Figure out the ID of the client that was just created. + let mut events = res.unwrap().events; + let client_id_event = events.pop(); + assert!(client_id_event.is_some(), "There was no event generated for client creation!"); + let client_id = match client_id_event.unwrap() { + IbcEvent::CreateClient(create_client) => create_client.client_id().clone(), + event => panic!("unexpected IBC event: {:?}", event), + }; + + let tests: Vec = vec![ + // Test some ICS2 client functionality. + Test { + name: "Client update successful".to_string(), + msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { + client_id: client_id.clone(), + client_message: MockHeader::new(update_client_height) + .with_timestamp(Timestamp::now()) + .into(), + signer: default_signer.clone(), + })) + .into(), + want_pass: true, + }, + Test { + name: "Client update fails due to stale header".to_string(), + msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { + client_id: client_id.clone(), + client_message: MockHeader::new(update_client_height).into(), + signer: default_signer.clone(), + })) + .into(), + want_pass: false, + }, + Test { + name: "Connection open init succeeds".to_string(), + msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenInit( + msg_conn_init.with_client_id(client_id.clone()), + )) + .into(), + want_pass: true, + }, + Test { + name: "Connection open try fails due to InvalidConsensusHeight (too high)" + .to_string(), + msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenTry(Box::new( + incorrect_msg_conn_try, + ))) + .into(), + want_pass: false, + }, + Test { + name: "Connection open try succeeds".to_string(), + msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenTry(Box::new( + correct_msg_conn_try.with_client_id(client_id.clone()), + ))) + .into(), + want_pass: true, + }, + Test { + name: "Connection open ack succeeds".to_string(), + msg: Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenAck(Box::new( + msg_conn_ack, + ))) + .into(), + want_pass: true, + }, + // ICS04 + Test { + name: "Channel open init succeeds".to_string(), + msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenInit(msg_chan_init)) + .into(), + want_pass: true, + }, + Test { + name: "Channel open init fail due to missing connection".to_string(), + msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenInit( + incorrect_msg_chan_init, + )) + .into(), + want_pass: false, + }, + Test { + name: "Channel open try succeeds".to_string(), + msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenTry(msg_chan_try)).into(), + want_pass: true, + }, + Test { + name: "Channel open ack succeeds".to_string(), + msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenAck(msg_chan_ack)).into(), + want_pass: true, + }, + Test { name: "Packet send".to_string(), msg: msg_transfer.into(), want_pass: true }, + // The client update is required in this test, because the proof associated with + // msg_recv_packet has the same height as the packet TO height (see + // get_dummy_raw_msg_recv_packet) + Test { + name: "Client update successful #2".to_string(), + msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { + client_id: client_id.clone(), + client_message: MockHeader::new(update_client_height_after_send) + .with_timestamp(Timestamp::now()) + .into(), + signer: default_signer.clone(), + })) + .into(), + want_pass: true, + }, + Test { + name: "Receive packet".to_string(), + msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket(msg_recv_packet.clone())) + .into(), + want_pass: true, + }, + Test { + name: "Re-Receive packet".to_string(), + msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket(msg_recv_packet)).into(), + want_pass: true, + }, + Test { name: "Packet send".to_string(), msg: msg_transfer_two.into(), want_pass: true }, + Test { + name: "Client update successful".to_string(), + msg: Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(MsgUpdateAnyClient { + client_id: client_id.clone(), + client_message: MockHeader::new(update_client_height_after_second_send).into(), + signer: default_signer.clone(), + })) + .into(), + want_pass: true, + }, + //ICS04-close channel + Test { + name: "Channel close init succeeds".to_string(), + msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseInit( + msg_chan_close_init, + )) + .into(), + want_pass: true, + }, + Test { + name: "Channel close confirm fails cause channel is already closed".to_string(), + msg: Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseConfirm( + msg_chan_close_confirm, + )) + .into(), + want_pass: false, + }, + //ICS04-to_on_close + Test { + name: "Timeout on close".to_string(), + msg: Ics26Envelope::Ics4PacketMsg(PacketMsg::ToClosePacket(msg_to_on_close)).into(), + want_pass: true, + }, + Test { + name: "Client upgrade successful".to_string(), + msg: Ics26Envelope::Ics2Msg(ClientMsg::UpgradeClient(MsgUpgradeAnyClient::new( + client_id.clone(), + AnyClientState::Mock(MockClientState::new(MockClientMessage::Header( + MockHeader::new(upgrade_client_height), + ))), + AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new( + upgrade_client_height, + ))), + Vec::new(), + Vec::new(), + default_signer.clone(), + ))) + .into(), + want_pass: true, + }, + Test { + name: "Client upgrade un-successful".to_string(), + msg: Ics26Envelope::Ics2Msg(ClientMsg::UpgradeClient(MsgUpgradeAnyClient::new( + client_id, + AnyClientState::Mock(MockClientState::new(MockClientMessage::Header( + MockHeader::new(upgrade_client_height_second), + ))), + AnyConsensusState::Mock(MockConsensusState::new(MockHeader::new( + upgrade_client_height_second, + ))), + Vec::new(), + Vec::new(), + default_signer, + ))) + .into(), + want_pass: false, + }, + ] + .into_iter() + .collect(); + + for test in tests { + let res = match test.msg.clone() { + TestMsg::Ics26(msg) => dispatch(&mut ctx, msg).map(|_| ()), + TestMsg::Ics20(msg) => { + let transfer_module = + ctx.router_mut().get_route_mut(&transfer_module_id).unwrap(); + ics20_deliver( + transfer_module + .as_any_mut() + .downcast_mut::>() + .unwrap(), + &mut HandlerOutputBuilder::new(), + msg, + ) + .map(|_| ()) + .map_err(Error::ics04_channel) + }, + }; + + assert_eq!( + test.want_pass, + res.is_ok(), + "ICS26 routing dispatch test '{}' failed for message {:?}\nwith result: {:?}", + test.name, + test.msg, + res + ); + } + } } diff --git a/modules/src/core/ics26_routing/msgs.rs b/modules/src/core/ics26_routing/msgs.rs index 3f2306e6e9..846ce570fa 100644 --- a/modules/src/core/ics26_routing/msgs.rs +++ b/modules/src/core/ics26_routing/msgs.rs @@ -1,156 +1,169 @@ use crate::prelude::*; +use core::fmt::{Debug, Display}; use ibc_proto::google::protobuf::Any; -use crate::core::ics02_client::msgs::{create_client, update_client, upgrade_client, ClientMsg}; -use crate::core::ics03_connection::msgs::{ - conn_open_ack, conn_open_confirm, conn_open_init, conn_open_try, ConnectionMsg, +use crate::core::{ + ics02_client::msgs::{ + create_client, create_client::MsgCreateAnyClient, update_client, + update_client::MsgUpdateAnyClient, upgrade_client, upgrade_client::MsgUpgradeAnyClient, + ClientMsg, + }, + ics03_connection::msgs::{ + conn_open_ack, conn_open_ack::MsgConnectionOpenAck, conn_open_confirm, conn_open_init, + conn_open_try, conn_open_try::MsgConnectionOpenTry, ConnectionMsg, + }, + ics04_channel::msgs::{ + acknowledgement, chan_close_confirm, chan_close_init, chan_open_ack, chan_open_confirm, + chan_open_init, chan_open_try, recv_packet, timeout, timeout_on_close, ChannelMsg, + PacketMsg, + }, + ics26_routing::error::Error, }; -use crate::core::ics04_channel::msgs::{ - acknowledgement, chan_close_confirm, chan_close_init, chan_open_ack, chan_open_confirm, - chan_open_init, chan_open_try, recv_packet, timeout, timeout_on_close, ChannelMsg, PacketMsg, + +use crate::core::ics02_client::context::ClientTypes; +use ibc_proto::ibc::core::{ + client::v1::{MsgCreateClient, MsgUpdateClient, MsgUpgradeClient}, + connection, }; -use crate::core::ics26_routing::error::Error; use tendermint_proto::Protobuf; /// Enumeration of all messages that the local ICS26 module is capable of routing. #[derive(Clone, Debug)] -pub enum Ics26Envelope { - Ics2Msg(ClientMsg), - Ics3Msg(ConnectionMsg), - Ics4ChannelMsg(ChannelMsg), - Ics4PacketMsg(PacketMsg), +pub enum Ics26Envelope +where + C: ClientTypes + Eq + Clone + Debug, +{ + Ics2Msg(ClientMsg), + Ics3Msg(ConnectionMsg), + Ics4ChannelMsg(ChannelMsg), + Ics4PacketMsg(PacketMsg), } -impl TryFrom for Ics26Envelope { - type Error = Error; +impl TryFrom for Ics26Envelope +where + C: ClientTypes + Clone + Debug + PartialEq + Eq, + Any: From, + Any: From, + Any: From, + MsgCreateAnyClient: TryFrom, + as TryFrom>::Error: Display, + MsgCreateAnyClient: Protobuf, + MsgUpdateAnyClient: TryFrom, + as TryFrom>::Error: Display, + MsgUpdateAnyClient: Protobuf, + MsgUpgradeAnyClient: TryFrom, + as TryFrom>::Error: Display, + MsgUpgradeAnyClient: Protobuf, + MsgConnectionOpenTry: TryFrom, + as TryFrom>::Error: Display, + MsgConnectionOpenTry: Protobuf, + connection::v1::MsgConnectionOpenAck: From>, + MsgConnectionOpenAck: TryFrom, + as TryFrom>::Error: Display, + MsgConnectionOpenAck: Protobuf, +{ + type Error = Error; - fn try_from(any_msg: Any) -> Result { - match any_msg.type_url.as_str() { - // ICS2 messages - create_client::TYPE_URL => { - // Pop out the message and then wrap it in the corresponding type. - let domain_msg = create_client::MsgCreateAnyClient::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics2Msg(ClientMsg::CreateClient(domain_msg))) - } - update_client::TYPE_URL => { - let domain_msg = update_client::MsgUpdateAnyClient::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(domain_msg))) - } - upgrade_client::TYPE_URL => { - let domain_msg = upgrade_client::MsgUpgradeAnyClient::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics2Msg(ClientMsg::UpgradeClient(domain_msg))) - } + fn try_from(any_msg: Any) -> Result { + match any_msg.type_url.as_str() { + // ICS2 messages + create_client::TYPE_URL => { + // Pop out the message and then wrap it in the corresponding type. + let domain_msg = MsgCreateAnyClient::::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics2Msg(ClientMsg::CreateClient(domain_msg))) + }, + update_client::TYPE_URL => { + let domain_msg = MsgUpdateAnyClient::::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics2Msg(ClientMsg::UpdateClient(domain_msg))) + }, + upgrade_client::TYPE_URL => { + let domain_msg = + upgrade_client::MsgUpgradeAnyClient::::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics2Msg(ClientMsg::UpgradeClient(domain_msg))) + }, - // ICS03 - conn_open_init::TYPE_URL => { - let domain_msg = conn_open_init::MsgConnectionOpenInit::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenInit( - domain_msg, - ))) - } - conn_open_try::TYPE_URL => { - let domain_msg = conn_open_try::MsgConnectionOpenTry::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenTry( - Box::new(domain_msg), - ))) - } - conn_open_ack::TYPE_URL => { - let domain_msg = conn_open_ack::MsgConnectionOpenAck::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenAck( - Box::new(domain_msg), - ))) - } - conn_open_confirm::TYPE_URL => { - let domain_msg = - conn_open_confirm::MsgConnectionOpenConfirm::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics3Msg( - ConnectionMsg::ConnectionOpenConfirm(domain_msg), - )) - } + // ICS03 + conn_open_init::TYPE_URL => { + let domain_msg = conn_open_init::MsgConnectionOpenInit::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenInit(domain_msg))) + }, + conn_open_try::TYPE_URL => { + let domain_msg = MsgConnectionOpenTry::::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenTry(Box::new(domain_msg)))) + }, + conn_open_ack::TYPE_URL => { + let domain_msg = MsgConnectionOpenAck::::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenAck(Box::new(domain_msg)))) + }, + conn_open_confirm::TYPE_URL => { + let domain_msg = + conn_open_confirm::MsgConnectionOpenConfirm::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics3Msg(ConnectionMsg::ConnectionOpenConfirm(domain_msg))) + }, - // ICS04 channel messages - chan_open_init::TYPE_URL => { - let domain_msg = chan_open_init::MsgChannelOpenInit::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenInit( - domain_msg, - ))) - } - chan_open_try::TYPE_URL => { - let domain_msg = chan_open_try::MsgChannelOpenTry::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenTry( - domain_msg, - ))) - } - chan_open_ack::TYPE_URL => { - let domain_msg = chan_open_ack::MsgChannelOpenAck::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenAck( - domain_msg, - ))) - } - chan_open_confirm::TYPE_URL => { - let domain_msg = - chan_open_confirm::MsgChannelOpenConfirm::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4ChannelMsg( - ChannelMsg::ChannelOpenConfirm(domain_msg), - )) - } - chan_close_init::TYPE_URL => { - let domain_msg = chan_close_init::MsgChannelCloseInit::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseInit( - domain_msg, - ))) - } - chan_close_confirm::TYPE_URL => { - let domain_msg = - chan_close_confirm::MsgChannelCloseConfirm::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4ChannelMsg( - ChannelMsg::ChannelCloseConfirm(domain_msg), - )) - } - // ICS04 packet messages - recv_packet::TYPE_URL => { - let domain_msg = recv_packet::MsgRecvPacket::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket( - domain_msg, - ))) - } - acknowledgement::TYPE_URL => { - let domain_msg = acknowledgement::MsgAcknowledgement::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::AckPacket( - domain_msg, - ))) - } - timeout::TYPE_URL => { - let domain_msg = timeout::MsgTimeout::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::ToPacket( - domain_msg, - ))) - } - timeout_on_close::TYPE_URL => { - let domain_msg = timeout_on_close::MsgTimeoutOnClose::decode_vec(&any_msg.value) - .map_err(Error::malformed_message_bytes)?; - Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::ToClosePacket( - domain_msg, - ))) - } - _ => Err(Error::unknown_message_type_url(any_msg.type_url)), - } - } + // ICS04 channel messages + chan_open_init::TYPE_URL => { + let domain_msg = chan_open_init::MsgChannelOpenInit::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenInit(domain_msg))) + }, + chan_open_try::TYPE_URL => { + let domain_msg = chan_open_try::MsgChannelOpenTry::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenTry(domain_msg))) + }, + chan_open_ack::TYPE_URL => { + let domain_msg = chan_open_ack::MsgChannelOpenAck::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenAck(domain_msg))) + }, + chan_open_confirm::TYPE_URL => { + let domain_msg = + chan_open_confirm::MsgChannelOpenConfirm::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelOpenConfirm(domain_msg))) + }, + chan_close_init::TYPE_URL => { + let domain_msg = chan_close_init::MsgChannelCloseInit::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseInit(domain_msg))) + }, + chan_close_confirm::TYPE_URL => { + let domain_msg = + chan_close_confirm::MsgChannelCloseConfirm::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4ChannelMsg(ChannelMsg::ChannelCloseConfirm(domain_msg))) + }, + // ICS04 packet messages + recv_packet::TYPE_URL => { + let domain_msg = recv_packet::MsgRecvPacket::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::RecvPacket(domain_msg))) + }, + acknowledgement::TYPE_URL => { + let domain_msg = acknowledgement::MsgAcknowledgement::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::AckPacket(domain_msg))) + }, + timeout::TYPE_URL => { + let domain_msg = timeout::MsgTimeout::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::ToPacket(domain_msg))) + }, + timeout_on_close::TYPE_URL => { + let domain_msg = timeout_on_close::MsgTimeoutOnClose::decode_vec(&any_msg.value) + .map_err(Error::malformed_message_bytes)?; + Ok(Ics26Envelope::Ics4PacketMsg(PacketMsg::ToClosePacket(domain_msg))) + }, + _ => Err(Error::unknown_message_type_url(any_msg.type_url)), + } + } } diff --git a/modules/src/events.rs b/modules/src/events.rs index 58e6d54e2b..f3c31f9309 100644 --- a/modules/src/events.rs +++ b/modules/src/events.rs @@ -1,95 +1,96 @@ use crate::prelude::*; use alloc::collections::btree_map::BTreeMap as HashMap; -use core::convert::{TryFrom, TryInto}; -use core::fmt; -use core::str::FromStr; +use core::{convert::TryFrom, fmt, str::FromStr}; use flex_error::{define_error, TraceError}; use prost::alloc::fmt::Formatter; use serde_derive::{Deserialize, Serialize}; -use tendermint::abci::tag::Tag; -use tendermint::abci::Event as AbciEvent; - -use crate::core::ics02_client::error as client_error; -use crate::core::ics02_client::events as ClientEvents; -use crate::core::ics02_client::events::NewBlock; -use crate::core::ics02_client::height::HeightError; -use crate::core::ics03_connection::events as ConnectionEvents; -use crate::core::ics03_connection::events::Attributes as ConnectionAttributes; -use crate::core::ics04_channel::error as channel_error; -use crate::core::ics04_channel::events as ChannelEvents; -use crate::core::ics04_channel::events::Attributes as ChannelAttributes; -use crate::core::ics04_channel::packet::Packet; -use crate::core::ics24_host::error::ValidationError; -use crate::core::ics26_routing::context::ModuleId; -use crate::timestamp::ParseTimestampError; -use crate::Height; +use tendermint::abci::{Event as AbciEvent, EventAttribute}; + +use crate::{ + core::{ + ics02_client::{ + error as client_error, events as ClientEvents, events::NewBlock, height::HeightError, + }, + ics03_connection::{ + events as ConnectionEvents, events::Attributes as ConnectionAttributes, + }, + ics04_channel::{ + error as channel_error, events as ChannelEvents, + events::Attributes as ChannelAttributes, packet::Packet, + }, + ics24_host::error::ValidationError, + ics26_routing::context::ModuleId, + }, + timestamp::ParseTimestampError, + Height, +}; define_error! { - Error { - Height - [ HeightError ] - | _ | { "error parsing height" }, - - Parse - [ ValidationError ] - | _ | { "parse error" }, - - Client - [ client_error::Error ] - | _ | { "ICS02 client error" }, - - Channel - [ channel_error::Error ] - | _ | { "channel error" }, - - Timestamp - [ ParseTimestampError ] - | _ | { "error parsing timestamp" }, - - MissingKey - { key: String } - | e | { format_args!("missing event key {}", e.key) }, - - Decode - [ TraceError ] - | _ | { "error decoding protobuf" }, - - SubtleEncoding - [ TraceError ] - | _ | { "error decoding hex" }, - - MissingActionString - | _ | { "missing action string" }, - - IncorrectEventType - { event: String } - | e | { format_args!("incorrect event type: {}", e.event) }, - - MalformedModuleEvent - { event: ModuleEvent } - | e | { format_args!("module event cannot use core event types: {:?}", e.event) }, - } + Error { + Height + [ HeightError ] + | _ | { "error parsing height" }, + + Parse + [ ValidationError ] + | _ | { "parse error" }, + + Client + [ client_error::Error ] + | _ | { "ICS02 client error" }, + + Channel + [ channel_error::Error ] + | _ | { "channel error" }, + + Timestamp + [ ParseTimestampError ] + | _ | { "error parsing timestamp" }, + + MissingKey + { key: String } + | e | { format_args!("missing event key {}", e.key) }, + + Decode + [ TraceError ] + | _ | { "error decoding protobuf" }, + + SubtleEncoding + [ TraceError ] + | _ | { "error decoding hex" }, + + MissingActionString + | _ | { "missing action string" }, + + IncorrectEventType + { event: String } + | e | { format_args!("incorrect event type: {}", e.event) }, + + MalformedModuleEvent + { event: ModuleEvent } + | e | { format_args!("module event cannot use core event types: {:?}", e.event) }, + } } /// Events whose data is not included in the app state and must be extracted using tendermint RPCs /// (i.e. /tx_search or /block_search) #[derive(Debug, Clone, Deserialize, Serialize)] pub enum WithBlockDataType { - CreateClient, - UpdateClient, - SendPacket, - WriteAck, + CreateClient, + UpdateClient, + SendPacket, + WriteAck, } impl WithBlockDataType { - pub fn as_str(&self) -> &'static str { - match *self { - WithBlockDataType::CreateClient => "create_client", - WithBlockDataType::UpdateClient => "update_client", - WithBlockDataType::SendPacket => "send_packet", - WithBlockDataType::WriteAck => "write_acknowledgement", - } - } + pub fn as_str(&self) -> &'static str { + match *self { + WithBlockDataType::CreateClient => "create_client", + WithBlockDataType::UpdateClient => "update_client", + WithBlockDataType::SendPacket => "send_packet", + WithBlockDataType::WriteAck => "write_acknowledgement", + } + } } const NEW_BLOCK_EVENT: &str = "new_block"; @@ -124,473 +125,406 @@ const TIMEOUT_ON_CLOSE_EVENT: &str = "timeout_packet_on_close"; /// Events types #[derive(Debug, Clone, Deserialize, Serialize)] pub enum IbcEventType { - NewBlock, - CreateClient, - UpdateClient, - UpgradeClient, - ClientMisbehaviour, - OpenInitConnection, - OpenTryConnection, - OpenAckConnection, - OpenConfirmConnection, - OpenInitChannel, - OpenTryChannel, - OpenAckChannel, - OpenConfirmChannel, - CloseInitChannel, - CloseConfirmChannel, - SendPacket, - ReceivePacket, - WriteAck, - AckPacket, - Timeout, - TimeoutOnClose, - AppModule, - Empty, - ChainError, + NewBlock, + CreateClient, + UpdateClient, + UpgradeClient, + ClientMisbehaviour, + OpenInitConnection, + OpenTryConnection, + OpenAckConnection, + OpenConfirmConnection, + OpenInitChannel, + OpenTryChannel, + OpenAckChannel, + OpenConfirmChannel, + CloseInitChannel, + CloseConfirmChannel, + SendPacket, + ReceivePacket, + WriteAck, + AckPacket, + Timeout, + TimeoutOnClose, + AppModule, + Empty, + ChainError, } impl IbcEventType { - pub fn as_str(&self) -> &'static str { - match *self { - IbcEventType::NewBlock => NEW_BLOCK_EVENT, - IbcEventType::CreateClient => CREATE_CLIENT_EVENT, - IbcEventType::UpdateClient => UPDATE_CLIENT_EVENT, - IbcEventType::UpgradeClient => UPGRADE_CLIENT_EVENT, - IbcEventType::ClientMisbehaviour => CLIENT_MISBEHAVIOUR_EVENT, - IbcEventType::OpenInitConnection => CONNECTION_INIT_EVENT, - IbcEventType::OpenTryConnection => CONNECTION_TRY_EVENT, - IbcEventType::OpenAckConnection => CONNECTION_ACK_EVENT, - IbcEventType::OpenConfirmConnection => CONNECTION_CONFIRM_EVENT, - IbcEventType::OpenInitChannel => CHANNEL_OPEN_INIT_EVENT, - IbcEventType::OpenTryChannel => CHANNEL_OPEN_TRY_EVENT, - IbcEventType::OpenAckChannel => CHANNEL_OPEN_ACK_EVENT, - IbcEventType::OpenConfirmChannel => CHANNEL_OPEN_CONFIRM_EVENT, - IbcEventType::CloseInitChannel => CHANNEL_CLOSE_INIT_EVENT, - IbcEventType::CloseConfirmChannel => CHANNEL_CLOSE_CONFIRM_EVENT, - IbcEventType::SendPacket => SEND_PACKET_EVENT, - IbcEventType::ReceivePacket => RECEIVE_PACKET_EVENT, - IbcEventType::WriteAck => WRITE_ACK_EVENT, - IbcEventType::AckPacket => ACK_PACKET_EVENT, - IbcEventType::Timeout => TIMEOUT_EVENT, - IbcEventType::TimeoutOnClose => TIMEOUT_ON_CLOSE_EVENT, - IbcEventType::AppModule => APP_MODULE_EVENT, - IbcEventType::Empty => EMPTY_EVENT, - IbcEventType::ChainError => CHAIN_ERROR_EVENT, - } - } + pub fn as_str(&self) -> &'static str { + match *self { + IbcEventType::NewBlock => NEW_BLOCK_EVENT, + IbcEventType::CreateClient => CREATE_CLIENT_EVENT, + IbcEventType::UpdateClient => UPDATE_CLIENT_EVENT, + IbcEventType::UpgradeClient => UPGRADE_CLIENT_EVENT, + IbcEventType::ClientMisbehaviour => CLIENT_MISBEHAVIOUR_EVENT, + IbcEventType::OpenInitConnection => CONNECTION_INIT_EVENT, + IbcEventType::OpenTryConnection => CONNECTION_TRY_EVENT, + IbcEventType::OpenAckConnection => CONNECTION_ACK_EVENT, + IbcEventType::OpenConfirmConnection => CONNECTION_CONFIRM_EVENT, + IbcEventType::OpenInitChannel => CHANNEL_OPEN_INIT_EVENT, + IbcEventType::OpenTryChannel => CHANNEL_OPEN_TRY_EVENT, + IbcEventType::OpenAckChannel => CHANNEL_OPEN_ACK_EVENT, + IbcEventType::OpenConfirmChannel => CHANNEL_OPEN_CONFIRM_EVENT, + IbcEventType::CloseInitChannel => CHANNEL_CLOSE_INIT_EVENT, + IbcEventType::CloseConfirmChannel => CHANNEL_CLOSE_CONFIRM_EVENT, + IbcEventType::SendPacket => SEND_PACKET_EVENT, + IbcEventType::ReceivePacket => RECEIVE_PACKET_EVENT, + IbcEventType::WriteAck => WRITE_ACK_EVENT, + IbcEventType::AckPacket => ACK_PACKET_EVENT, + IbcEventType::Timeout => TIMEOUT_EVENT, + IbcEventType::TimeoutOnClose => TIMEOUT_ON_CLOSE_EVENT, + IbcEventType::AppModule => APP_MODULE_EVENT, + IbcEventType::Empty => EMPTY_EVENT, + IbcEventType::ChainError => CHAIN_ERROR_EVENT, + } + } } impl FromStr for IbcEventType { - type Err = Error; - - fn from_str(s: &str) -> Result { - match s { - NEW_BLOCK_EVENT => Ok(IbcEventType::NewBlock), - CREATE_CLIENT_EVENT => Ok(IbcEventType::CreateClient), - UPDATE_CLIENT_EVENT => Ok(IbcEventType::UpdateClient), - UPGRADE_CLIENT_EVENT => Ok(IbcEventType::UpgradeClient), - CLIENT_MISBEHAVIOUR_EVENT => Ok(IbcEventType::ClientMisbehaviour), - CONNECTION_INIT_EVENT => Ok(IbcEventType::OpenInitConnection), - CONNECTION_TRY_EVENT => Ok(IbcEventType::OpenTryConnection), - CONNECTION_ACK_EVENT => Ok(IbcEventType::OpenAckConnection), - CONNECTION_CONFIRM_EVENT => Ok(IbcEventType::OpenConfirmConnection), - CHANNEL_OPEN_INIT_EVENT => Ok(IbcEventType::OpenInitChannel), - CHANNEL_OPEN_TRY_EVENT => Ok(IbcEventType::OpenTryChannel), - CHANNEL_OPEN_ACK_EVENT => Ok(IbcEventType::OpenAckChannel), - CHANNEL_OPEN_CONFIRM_EVENT => Ok(IbcEventType::OpenConfirmChannel), - CHANNEL_CLOSE_INIT_EVENT => Ok(IbcEventType::CloseInitChannel), - CHANNEL_CLOSE_CONFIRM_EVENT => Ok(IbcEventType::CloseConfirmChannel), - SEND_PACKET_EVENT => Ok(IbcEventType::SendPacket), - RECEIVE_PACKET_EVENT => Ok(IbcEventType::ReceivePacket), - WRITE_ACK_EVENT => Ok(IbcEventType::WriteAck), - ACK_PACKET_EVENT => Ok(IbcEventType::AckPacket), - TIMEOUT_EVENT => Ok(IbcEventType::Timeout), - TIMEOUT_ON_CLOSE_EVENT => Ok(IbcEventType::TimeoutOnClose), - EMPTY_EVENT => Ok(IbcEventType::Empty), - CHAIN_ERROR_EVENT => Ok(IbcEventType::ChainError), - // from_str() for `APP_MODULE_EVENT` MUST fail because a `ModuleEvent`'s type isn't constant - _ => Err(Error::incorrect_event_type(s.to_string())), - } - } + type Err = Error; + + fn from_str(s: &str) -> Result { + match s { + NEW_BLOCK_EVENT => Ok(IbcEventType::NewBlock), + CREATE_CLIENT_EVENT => Ok(IbcEventType::CreateClient), + UPDATE_CLIENT_EVENT => Ok(IbcEventType::UpdateClient), + UPGRADE_CLIENT_EVENT => Ok(IbcEventType::UpgradeClient), + CLIENT_MISBEHAVIOUR_EVENT => Ok(IbcEventType::ClientMisbehaviour), + CONNECTION_INIT_EVENT => Ok(IbcEventType::OpenInitConnection), + CONNECTION_TRY_EVENT => Ok(IbcEventType::OpenTryConnection), + CONNECTION_ACK_EVENT => Ok(IbcEventType::OpenAckConnection), + CONNECTION_CONFIRM_EVENT => Ok(IbcEventType::OpenConfirmConnection), + CHANNEL_OPEN_INIT_EVENT => Ok(IbcEventType::OpenInitChannel), + CHANNEL_OPEN_TRY_EVENT => Ok(IbcEventType::OpenTryChannel), + CHANNEL_OPEN_ACK_EVENT => Ok(IbcEventType::OpenAckChannel), + CHANNEL_OPEN_CONFIRM_EVENT => Ok(IbcEventType::OpenConfirmChannel), + CHANNEL_CLOSE_INIT_EVENT => Ok(IbcEventType::CloseInitChannel), + CHANNEL_CLOSE_CONFIRM_EVENT => Ok(IbcEventType::CloseConfirmChannel), + SEND_PACKET_EVENT => Ok(IbcEventType::SendPacket), + RECEIVE_PACKET_EVENT => Ok(IbcEventType::ReceivePacket), + WRITE_ACK_EVENT => Ok(IbcEventType::WriteAck), + ACK_PACKET_EVENT => Ok(IbcEventType::AckPacket), + TIMEOUT_EVENT => Ok(IbcEventType::Timeout), + TIMEOUT_ON_CLOSE_EVENT => Ok(IbcEventType::TimeoutOnClose), + EMPTY_EVENT => Ok(IbcEventType::Empty), + CHAIN_ERROR_EVENT => Ok(IbcEventType::ChainError), + // from_str() for `APP_MODULE_EVENT` MUST fail because a `ModuleEvent`'s type isn't + // constant + _ => Err(Error::incorrect_event_type(s.to_string())), + } + } } /// Events created by the IBC component of a chain, destined for a relayer. #[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] pub enum IbcEvent { - NewBlock(NewBlock), - - CreateClient(ClientEvents::CreateClient), - UpdateClient(ClientEvents::UpdateClient), - UpgradeClient(ClientEvents::UpgradeClient), - ClientMisbehaviour(ClientEvents::ClientMisbehaviour), - - OpenInitConnection(ConnectionEvents::OpenInit), - OpenTryConnection(ConnectionEvents::OpenTry), - OpenAckConnection(ConnectionEvents::OpenAck), - OpenConfirmConnection(ConnectionEvents::OpenConfirm), - - OpenInitChannel(ChannelEvents::OpenInit), - OpenTryChannel(ChannelEvents::OpenTry), - OpenAckChannel(ChannelEvents::OpenAck), - OpenConfirmChannel(ChannelEvents::OpenConfirm), - CloseInitChannel(ChannelEvents::CloseInit), - CloseConfirmChannel(ChannelEvents::CloseConfirm), - - SendPacket(ChannelEvents::SendPacket), - ReceivePacket(ChannelEvents::ReceivePacket), - WriteAcknowledgement(ChannelEvents::WriteAcknowledgement), - AcknowledgePacket(ChannelEvents::AcknowledgePacket), - TimeoutPacket(ChannelEvents::TimeoutPacket), - TimeoutOnClosePacket(ChannelEvents::TimeoutOnClosePacket), - - AppModule(ModuleEvent), - - Empty(String), // Special event, signifying empty response - ChainError(String), // Special event, signifying an error on CheckTx or DeliverTx + NewBlock(NewBlock), + + CreateClient(ClientEvents::CreateClient), + UpdateClient(ClientEvents::UpdateClient), + UpgradeClient(ClientEvents::UpgradeClient), + ClientMisbehaviour(ClientEvents::ClientMisbehaviour), + + OpenInitConnection(ConnectionEvents::OpenInit), + OpenTryConnection(ConnectionEvents::OpenTry), + OpenAckConnection(ConnectionEvents::OpenAck), + OpenConfirmConnection(ConnectionEvents::OpenConfirm), + + OpenInitChannel(ChannelEvents::OpenInit), + OpenTryChannel(ChannelEvents::OpenTry), + OpenAckChannel(ChannelEvents::OpenAck), + OpenConfirmChannel(ChannelEvents::OpenConfirm), + CloseInitChannel(ChannelEvents::CloseInit), + CloseConfirmChannel(ChannelEvents::CloseConfirm), + + SendPacket(ChannelEvents::SendPacket), + ReceivePacket(ChannelEvents::ReceivePacket), + WriteAcknowledgement(ChannelEvents::WriteAcknowledgement), + AcknowledgePacket(ChannelEvents::AcknowledgePacket), + TimeoutPacket(ChannelEvents::TimeoutPacket), + TimeoutOnClosePacket(ChannelEvents::TimeoutOnClosePacket), + + AppModule(ModuleEvent), + + Empty(String), // Special event, signifying empty response + ChainError(String), // Special event, signifying an error on CheckTx or DeliverTx } impl Default for IbcEvent { - fn default() -> Self { - Self::Empty("".to_string()) - } + fn default() -> Self { + Self::Empty("".to_string()) + } } /// For use in debug messages pub struct PrettyEvents<'a>(pub &'a [IbcEvent]); impl<'a> fmt::Display for PrettyEvents<'a> { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - writeln!(f, "events:")?; - for v in self.0 { - writeln!(f, "\t{}", v)?; - } - Ok(()) - } + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + writeln!(f, "events:")?; + for v in self.0 { + writeln!(f, "\t{}", v)?; + } + Ok(()) + } } impl fmt::Display for IbcEvent { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - IbcEvent::NewBlock(ev) => write!(f, "NewBlock({})", ev.height), - - IbcEvent::CreateClient(ev) => write!(f, "CreateClientEv({})", ev), - IbcEvent::UpdateClient(ev) => write!(f, "UpdateClientEv({})", ev), - IbcEvent::UpgradeClient(ev) => write!(f, "UpgradeClientEv({:?})", ev), - IbcEvent::ClientMisbehaviour(ev) => write!(f, "ClientMisbehaviourEv({:?})", ev), - - IbcEvent::OpenInitConnection(ev) => write!(f, "OpenInitConnectionEv({:?})", ev), - IbcEvent::OpenTryConnection(ev) => write!(f, "OpenTryConnectionEv({:?})", ev), - IbcEvent::OpenAckConnection(ev) => write!(f, "OpenAckConnectionEv({:?})", ev), - IbcEvent::OpenConfirmConnection(ev) => write!(f, "OpenConfirmConnectionEv({:?})", ev), - - IbcEvent::OpenInitChannel(ev) => write!(f, "OpenInitChannelEv({:?})", ev), - IbcEvent::OpenTryChannel(ev) => write!(f, "OpenTryChannelEv({:?})", ev), - IbcEvent::OpenAckChannel(ev) => write!(f, "OpenAckChannelEv({:?})", ev), - IbcEvent::OpenConfirmChannel(ev) => write!(f, "OpenConfirmChannelEv({:?})", ev), - IbcEvent::CloseInitChannel(ev) => write!(f, "CloseInitChannelEv({})", ev), - IbcEvent::CloseConfirmChannel(ev) => write!(f, "CloseConfirmChannelEv({:?})", ev), - - IbcEvent::SendPacket(ev) => write!(f, "SendPacketEv({})", ev), - IbcEvent::ReceivePacket(ev) => write!(f, "ReceivePacketEv({})", ev), - IbcEvent::WriteAcknowledgement(ev) => write!(f, "WriteAcknowledgementEv({})", ev), - IbcEvent::AcknowledgePacket(ev) => write!(f, "AcknowledgePacketEv({})", ev), - IbcEvent::TimeoutPacket(ev) => write!(f, "TimeoutPacketEv({})", ev), - IbcEvent::TimeoutOnClosePacket(ev) => write!(f, "TimeoutOnClosePacketEv({})", ev), - - IbcEvent::AppModule(ev) => write!(f, "AppModuleEv({:?})", ev), - - IbcEvent::Empty(ev) => write!(f, "EmptyEv({})", ev), - IbcEvent::ChainError(ev) => write!(f, "ChainErrorEv({})", ev), - } - } -} - -impl TryFrom for AbciEvent { - type Error = Error; - - fn try_from(event: IbcEvent) -> Result { - Ok(match event { - IbcEvent::CreateClient(event) => event.into(), - IbcEvent::UpdateClient(event) => event.into(), - IbcEvent::UpgradeClient(event) => event.into(), - IbcEvent::ClientMisbehaviour(event) => event.into(), - IbcEvent::OpenInitConnection(event) => event.into(), - IbcEvent::OpenTryConnection(event) => event.into(), - IbcEvent::OpenAckConnection(event) => event.into(), - IbcEvent::OpenConfirmConnection(event) => event.into(), - IbcEvent::OpenInitChannel(event) => event.into(), - IbcEvent::OpenTryChannel(event) => event.into(), - IbcEvent::OpenAckChannel(event) => event.into(), - IbcEvent::OpenConfirmChannel(event) => event.into(), - IbcEvent::CloseInitChannel(event) => event.into(), - IbcEvent::CloseConfirmChannel(event) => event.into(), - IbcEvent::SendPacket(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::ReceivePacket(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::WriteAcknowledgement(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::AcknowledgePacket(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::TimeoutPacket(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::TimeoutOnClosePacket(event) => event.try_into().map_err(Error::channel)?, - IbcEvent::AppModule(event) => event.try_into()?, - IbcEvent::NewBlock(_) | IbcEvent::Empty(_) | IbcEvent::ChainError(_) => { - return Err(Error::incorrect_event_type(event.to_string())) - } - }) - } -} - -// This is tendermint specific -pub fn from_tx_response_event(height: Height, event: &tendermint::abci::Event) -> Option { - // Return the first hit we find - if let Some(mut client_res) = ClientEvents::try_from_tx(event) { - client_res.set_height(height); - Some(client_res) - } else if let Some(mut conn_res) = ConnectionEvents::try_from_tx(event) { - conn_res.set_height(height); - Some(conn_res) - } else if let Some(mut chan_res) = ChannelEvents::try_from_tx(event) { - chan_res.set_height(height); - Some(chan_res) - } else { - None - } + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + IbcEvent::NewBlock(ev) => write!(f, "NewBlock({})", ev.height), + + IbcEvent::CreateClient(ev) => write!(f, "CreateClientEv({})", ev), + IbcEvent::UpdateClient(ev) => write!(f, "UpdateClientEv({})", ev), + IbcEvent::UpgradeClient(ev) => write!(f, "UpgradeClientEv({:?})", ev), + IbcEvent::ClientMisbehaviour(ev) => write!(f, "ClientMisbehaviourEv({:?})", ev), + + IbcEvent::OpenInitConnection(ev) => write!(f, "OpenInitConnectionEv({:?})", ev), + IbcEvent::OpenTryConnection(ev) => write!(f, "OpenTryConnectionEv({:?})", ev), + IbcEvent::OpenAckConnection(ev) => write!(f, "OpenAckConnectionEv({:?})", ev), + IbcEvent::OpenConfirmConnection(ev) => write!(f, "OpenConfirmConnectionEv({:?})", ev), + + IbcEvent::OpenInitChannel(ev) => write!(f, "OpenInitChannelEv({:?})", ev), + IbcEvent::OpenTryChannel(ev) => write!(f, "OpenTryChannelEv({:?})", ev), + IbcEvent::OpenAckChannel(ev) => write!(f, "OpenAckChannelEv({:?})", ev), + IbcEvent::OpenConfirmChannel(ev) => write!(f, "OpenConfirmChannelEv({:?})", ev), + IbcEvent::CloseInitChannel(ev) => write!(f, "CloseInitChannelEv({})", ev), + IbcEvent::CloseConfirmChannel(ev) => write!(f, "CloseConfirmChannelEv({:?})", ev), + + IbcEvent::SendPacket(ev) => write!(f, "SendPacketEv({})", ev), + IbcEvent::ReceivePacket(ev) => write!(f, "ReceivePacketEv({})", ev), + IbcEvent::WriteAcknowledgement(ev) => write!(f, "WriteAcknowledgementEv({})", ev), + IbcEvent::AcknowledgePacket(ev) => write!(f, "AcknowledgePacketEv({})", ev), + IbcEvent::TimeoutPacket(ev) => write!(f, "TimeoutPacketEv({})", ev), + IbcEvent::TimeoutOnClosePacket(ev) => write!(f, "TimeoutOnClosePacketEv({})", ev), + + IbcEvent::AppModule(ev) => write!(f, "AppModuleEv({:?})", ev), + + IbcEvent::Empty(ev) => write!(f, "EmptyEv({})", ev), + IbcEvent::ChainError(ev) => write!(f, "ChainErrorEv({})", ev), + } + } } impl IbcEvent { - pub fn to_json(&self) -> String { - match serde_json::to_string(self) { - Ok(value) => value, - Err(_) => format!("{:?}", self), // Fallback to debug printing - } - } - - pub fn height(&self) -> Height { - match self { - IbcEvent::NewBlock(bl) => bl.height(), - IbcEvent::CreateClient(ev) => ev.height(), - IbcEvent::UpdateClient(ev) => ev.height(), - IbcEvent::UpgradeClient(ev) => ev.height(), - IbcEvent::ClientMisbehaviour(ev) => ev.height(), - IbcEvent::OpenInitConnection(ev) => ev.height(), - IbcEvent::OpenTryConnection(ev) => ev.height(), - IbcEvent::OpenAckConnection(ev) => ev.height(), - IbcEvent::OpenConfirmConnection(ev) => ev.height(), - IbcEvent::OpenInitChannel(ev) => ev.height(), - IbcEvent::OpenTryChannel(ev) => ev.height(), - IbcEvent::OpenAckChannel(ev) => ev.height(), - IbcEvent::OpenConfirmChannel(ev) => ev.height(), - IbcEvent::CloseInitChannel(ev) => ev.height(), - IbcEvent::CloseConfirmChannel(ev) => ev.height(), - IbcEvent::SendPacket(ev) => ev.height(), - IbcEvent::ReceivePacket(ev) => ev.height(), - IbcEvent::WriteAcknowledgement(ev) => ev.height(), - IbcEvent::AcknowledgePacket(ev) => ev.height(), - IbcEvent::TimeoutPacket(ev) => ev.height(), - IbcEvent::TimeoutOnClosePacket(ev) => ev.height(), - _ => unimplemented!(), - } - } - - pub fn set_height(&mut self, height: Height) { - match self { - IbcEvent::NewBlock(ev) => ev.set_height(height), - IbcEvent::CreateClient(ev) => ev.set_height(height), - IbcEvent::UpdateClient(ev) => ev.set_height(height), - IbcEvent::UpgradeClient(ev) => ev.set_height(height), - IbcEvent::ClientMisbehaviour(ev) => ev.set_height(height), - IbcEvent::OpenInitConnection(ev) => ev.set_height(height), - IbcEvent::OpenTryConnection(ev) => ev.set_height(height), - IbcEvent::OpenAckConnection(ev) => ev.set_height(height), - IbcEvent::OpenConfirmConnection(ev) => ev.set_height(height), - IbcEvent::OpenInitChannel(ev) => ev.set_height(height), - IbcEvent::OpenTryChannel(ev) => ev.set_height(height), - IbcEvent::OpenAckChannel(ev) => ev.set_height(height), - IbcEvent::OpenConfirmChannel(ev) => ev.set_height(height), - IbcEvent::CloseInitChannel(ev) => ev.set_height(height), - IbcEvent::CloseConfirmChannel(ev) => ev.set_height(height), - IbcEvent::SendPacket(ev) => ev.set_height(height), - IbcEvent::ReceivePacket(ev) => ev.set_height(height), - IbcEvent::WriteAcknowledgement(ev) => ev.set_height(height), - IbcEvent::AcknowledgePacket(ev) => ev.set_height(height), - IbcEvent::TimeoutPacket(ev) => ev.set_height(height), - _ => unimplemented!(), - } - } - - pub fn event_type(&self) -> IbcEventType { - match self { - IbcEvent::NewBlock(_) => IbcEventType::NewBlock, - IbcEvent::CreateClient(_) => IbcEventType::CreateClient, - IbcEvent::UpdateClient(_) => IbcEventType::UpdateClient, - IbcEvent::ClientMisbehaviour(_) => IbcEventType::ClientMisbehaviour, - IbcEvent::UpgradeClient(_) => IbcEventType::UpgradeClient, - IbcEvent::OpenInitConnection(_) => IbcEventType::OpenInitConnection, - IbcEvent::OpenTryConnection(_) => IbcEventType::OpenTryConnection, - IbcEvent::OpenAckConnection(_) => IbcEventType::OpenAckConnection, - IbcEvent::OpenConfirmConnection(_) => IbcEventType::OpenConfirmConnection, - IbcEvent::OpenInitChannel(_) => IbcEventType::OpenInitChannel, - IbcEvent::OpenTryChannel(_) => IbcEventType::OpenTryChannel, - IbcEvent::OpenAckChannel(_) => IbcEventType::OpenAckChannel, - IbcEvent::OpenConfirmChannel(_) => IbcEventType::OpenConfirmChannel, - IbcEvent::CloseInitChannel(_) => IbcEventType::CloseInitChannel, - IbcEvent::CloseConfirmChannel(_) => IbcEventType::CloseConfirmChannel, - IbcEvent::SendPacket(_) => IbcEventType::SendPacket, - IbcEvent::ReceivePacket(_) => IbcEventType::ReceivePacket, - IbcEvent::WriteAcknowledgement(_) => IbcEventType::WriteAck, - IbcEvent::AcknowledgePacket(_) => IbcEventType::AckPacket, - IbcEvent::TimeoutPacket(_) => IbcEventType::Timeout, - IbcEvent::TimeoutOnClosePacket(_) => IbcEventType::TimeoutOnClose, - IbcEvent::AppModule(_) => IbcEventType::AppModule, - IbcEvent::Empty(_) => IbcEventType::Empty, - IbcEvent::ChainError(_) => IbcEventType::ChainError, - } - } - - pub fn channel_attributes(self) -> Option { - match self { - IbcEvent::OpenInitChannel(ev) => Some(ev.into()), - IbcEvent::OpenTryChannel(ev) => Some(ev.into()), - IbcEvent::OpenAckChannel(ev) => Some(ev.into()), - IbcEvent::OpenConfirmChannel(ev) => Some(ev.into()), - _ => None, - } - } - - pub fn connection_attributes(&self) -> Option<&ConnectionAttributes> { - match self { - IbcEvent::OpenInitConnection(ev) => Some(ev.attributes()), - IbcEvent::OpenTryConnection(ev) => Some(ev.attributes()), - IbcEvent::OpenAckConnection(ev) => Some(ev.attributes()), - IbcEvent::OpenConfirmConnection(ev) => Some(ev.attributes()), - _ => None, - } - } - - pub fn packet(&self) -> Option<&Packet> { - match self { - IbcEvent::SendPacket(ev) => Some(&ev.packet), - IbcEvent::ReceivePacket(ev) => Some(&ev.packet), - IbcEvent::WriteAcknowledgement(ev) => Some(&ev.packet), - IbcEvent::AcknowledgePacket(ev) => Some(&ev.packet), - IbcEvent::TimeoutPacket(ev) => Some(&ev.packet), - IbcEvent::TimeoutOnClosePacket(ev) => Some(&ev.packet), - _ => None, - } - } - - pub fn ack(&self) -> Option<&[u8]> { - match self { - IbcEvent::WriteAcknowledgement(ev) => Some(&ev.ack), - _ => None, - } - } + pub fn to_json(&self) -> String { + match serde_json::to_string(self) { + Ok(value) => value, + Err(_) => format!("{:?}", self), // Fallback to debug printing + } + } + + pub fn height(&self) -> Height { + match self { + IbcEvent::NewBlock(bl) => bl.height(), + IbcEvent::CreateClient(ev) => ev.height(), + IbcEvent::UpdateClient(ev) => ev.height(), + IbcEvent::UpgradeClient(ev) => ev.height(), + IbcEvent::ClientMisbehaviour(ev) => ev.height(), + IbcEvent::OpenInitConnection(ev) => ev.height(), + IbcEvent::OpenTryConnection(ev) => ev.height(), + IbcEvent::OpenAckConnection(ev) => ev.height(), + IbcEvent::OpenConfirmConnection(ev) => ev.height(), + IbcEvent::OpenInitChannel(ev) => ev.height(), + IbcEvent::OpenTryChannel(ev) => ev.height(), + IbcEvent::OpenAckChannel(ev) => ev.height(), + IbcEvent::OpenConfirmChannel(ev) => ev.height(), + IbcEvent::CloseInitChannel(ev) => ev.height(), + IbcEvent::CloseConfirmChannel(ev) => ev.height(), + IbcEvent::SendPacket(ev) => ev.height(), + IbcEvent::ReceivePacket(ev) => ev.height(), + IbcEvent::WriteAcknowledgement(ev) => ev.height(), + IbcEvent::AcknowledgePacket(ev) => ev.height(), + IbcEvent::TimeoutPacket(ev) => ev.height(), + IbcEvent::TimeoutOnClosePacket(ev) => ev.height(), + _ => unimplemented!(), + } + } + + pub fn set_height(&mut self, height: Height) { + match self { + IbcEvent::NewBlock(ev) => ev.set_height(height), + IbcEvent::CreateClient(ev) => ev.set_height(height), + IbcEvent::UpdateClient(ev) => ev.set_height(height), + IbcEvent::UpgradeClient(ev) => ev.set_height(height), + IbcEvent::ClientMisbehaviour(ev) => ev.set_height(height), + IbcEvent::OpenInitConnection(ev) => ev.set_height(height), + IbcEvent::OpenTryConnection(ev) => ev.set_height(height), + IbcEvent::OpenAckConnection(ev) => ev.set_height(height), + IbcEvent::OpenConfirmConnection(ev) => ev.set_height(height), + IbcEvent::OpenInitChannel(ev) => ev.set_height(height), + IbcEvent::OpenTryChannel(ev) => ev.set_height(height), + IbcEvent::OpenAckChannel(ev) => ev.set_height(height), + IbcEvent::OpenConfirmChannel(ev) => ev.set_height(height), + IbcEvent::CloseInitChannel(ev) => ev.set_height(height), + IbcEvent::CloseConfirmChannel(ev) => ev.set_height(height), + IbcEvent::SendPacket(ev) => ev.set_height(height), + IbcEvent::ReceivePacket(ev) => ev.set_height(height), + IbcEvent::WriteAcknowledgement(ev) => ev.set_height(height), + IbcEvent::AcknowledgePacket(ev) => ev.set_height(height), + IbcEvent::TimeoutPacket(ev) => ev.set_height(height), + _ => unimplemented!(), + } + } + + pub fn event_type(&self) -> IbcEventType { + match self { + IbcEvent::NewBlock(_) => IbcEventType::NewBlock, + IbcEvent::CreateClient(_) => IbcEventType::CreateClient, + IbcEvent::UpdateClient(_) => IbcEventType::UpdateClient, + IbcEvent::ClientMisbehaviour(_) => IbcEventType::ClientMisbehaviour, + IbcEvent::UpgradeClient(_) => IbcEventType::UpgradeClient, + IbcEvent::OpenInitConnection(_) => IbcEventType::OpenInitConnection, + IbcEvent::OpenTryConnection(_) => IbcEventType::OpenTryConnection, + IbcEvent::OpenAckConnection(_) => IbcEventType::OpenAckConnection, + IbcEvent::OpenConfirmConnection(_) => IbcEventType::OpenConfirmConnection, + IbcEvent::OpenInitChannel(_) => IbcEventType::OpenInitChannel, + IbcEvent::OpenTryChannel(_) => IbcEventType::OpenTryChannel, + IbcEvent::OpenAckChannel(_) => IbcEventType::OpenAckChannel, + IbcEvent::OpenConfirmChannel(_) => IbcEventType::OpenConfirmChannel, + IbcEvent::CloseInitChannel(_) => IbcEventType::CloseInitChannel, + IbcEvent::CloseConfirmChannel(_) => IbcEventType::CloseConfirmChannel, + IbcEvent::SendPacket(_) => IbcEventType::SendPacket, + IbcEvent::ReceivePacket(_) => IbcEventType::ReceivePacket, + IbcEvent::WriteAcknowledgement(_) => IbcEventType::WriteAck, + IbcEvent::AcknowledgePacket(_) => IbcEventType::AckPacket, + IbcEvent::TimeoutPacket(_) => IbcEventType::Timeout, + IbcEvent::TimeoutOnClosePacket(_) => IbcEventType::TimeoutOnClose, + IbcEvent::AppModule(_) => IbcEventType::AppModule, + IbcEvent::Empty(_) => IbcEventType::Empty, + IbcEvent::ChainError(_) => IbcEventType::ChainError, + } + } + + pub fn channel_attributes(self) -> Option { + match self { + IbcEvent::OpenInitChannel(ev) => Some(ev.into()), + IbcEvent::OpenTryChannel(ev) => Some(ev.into()), + IbcEvent::OpenAckChannel(ev) => Some(ev.into()), + IbcEvent::OpenConfirmChannel(ev) => Some(ev.into()), + _ => None, + } + } + + pub fn connection_attributes(&self) -> Option<&ConnectionAttributes> { + match self { + IbcEvent::OpenInitConnection(ev) => Some(ev.attributes()), + IbcEvent::OpenTryConnection(ev) => Some(ev.attributes()), + IbcEvent::OpenAckConnection(ev) => Some(ev.attributes()), + IbcEvent::OpenConfirmConnection(ev) => Some(ev.attributes()), + _ => None, + } + } + + pub fn packet(&self) -> Option<&Packet> { + match self { + IbcEvent::SendPacket(ev) => Some(&ev.packet), + IbcEvent::ReceivePacket(ev) => Some(&ev.packet), + IbcEvent::WriteAcknowledgement(ev) => Some(&ev.packet), + IbcEvent::AcknowledgePacket(ev) => Some(&ev.packet), + IbcEvent::TimeoutPacket(ev) => Some(&ev.packet), + IbcEvent::TimeoutOnClosePacket(ev) => Some(&ev.packet), + _ => None, + } + } + + pub fn ack(&self) -> Option<&[u8]> { + match self { + IbcEvent::WriteAcknowledgement(ev) => Some(&ev.ack), + _ => None, + } + } } #[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] pub struct ModuleEvent { - pub kind: String, - pub module_name: ModuleId, - pub attributes: Vec, + pub kind: String, + pub module_name: ModuleId, + pub attributes: Vec, } impl TryFrom for AbciEvent { - type Error = Error; - - fn try_from(event: ModuleEvent) -> Result { - if IbcEventType::from_str(event.kind.as_str()).is_ok() { - return Err(Error::malformed_module_event(event)); - } - - let attributes = event.attributes.into_iter().map(Into::into).collect(); - Ok(AbciEvent { - type_str: event.kind, - attributes, - }) - } + type Error = Error; + + fn try_from(event: ModuleEvent) -> Result { + if IbcEventType::from_str(event.kind.as_str()).is_ok() { + return Err(Error::malformed_module_event(event)) + } + + let attributes = event.attributes.into_iter().map(Into::into).collect(); + Ok(AbciEvent { kind: event.kind, attributes }) + } } impl From for IbcEvent { - fn from(e: ModuleEvent) -> Self { - IbcEvent::AppModule(e) - } + fn from(e: ModuleEvent) -> Self { + IbcEvent::AppModule(e) + } } #[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] pub struct ModuleEventAttribute { - pub key: String, - pub value: String, + pub key: String, + pub value: String, } impl From<(K, V)> for ModuleEventAttribute { - fn from((k, v): (K, V)) -> Self { - Self { - key: k.to_string(), - value: v.to_string(), - } - } + fn from((k, v): (K, V)) -> Self { + Self { key: k.to_string(), value: v.to_string() } + } } -impl From for Tag { - fn from(attr: ModuleEventAttribute) -> Self { - Self { - key: attr - .key - .parse() - .expect("Key::from_str() impl is infallible"), - value: attr - .key - .parse() - .expect("Value::from_str() impl is infallible"), - } - } +impl From for EventAttribute { + fn from(attr: ModuleEventAttribute) -> Self { + Self { + key: attr.key.parse().expect("Key::from_str() impl is infallible"), + value: attr.key.parse().expect("Value::from_str() impl is infallible"), + index: false, + } + } } #[derive(Debug, Clone, Serialize)] pub struct RawObject<'a> { - pub height: Height, - pub action: String, - pub idx: usize, - pub events: &'a HashMap>, + pub height: Height, + pub action: String, + pub idx: usize, + pub events: &'a HashMap>, } impl<'a> RawObject<'a> { - pub fn new( - height: Height, - action: String, - idx: usize, - events: &'a HashMap>, - ) -> RawObject<'a> { - RawObject { - height, - action, - idx, - events, - } - } + pub fn new( + height: Height, + action: String, + idx: usize, + events: &'a HashMap>, + ) -> RawObject<'a> { + RawObject { height, action, idx, events } + } } pub fn extract_events( - events: &HashMap>, - action_string: &str, + events: &HashMap>, + action_string: &str, ) -> Result<(), Error> { - if let Some(message_action) = events.get("message.action") { - if message_action.contains(&action_string.to_owned()) { - return Ok(()); - } - return Err(Error::missing_action_string()); - } - Err(Error::incorrect_event_type(action_string.to_string())) + if let Some(message_action) = events.get("message.action") { + if message_action.contains(&action_string.to_owned()) { + return Ok(()) + } + return Err(Error::missing_action_string()) + } + Err(Error::incorrect_event_type(action_string.to_string())) } pub fn extract_attribute(object: &RawObject<'_>, key: &str) -> Result { - let value = object - .events - .get(key) - .ok_or_else(|| Error::missing_key(key.to_string()))?[object.idx] - .clone(); + let value = object.events.get(key).ok_or_else(|| Error::missing_key(key.to_string()))? + [object.idx] + .clone(); - Ok(value) + Ok(value) } pub fn maybe_extract_attribute(object: &RawObject<'_>, key: &str) -> Option { - object.events.get(key).map(|tags| tags[object.idx].clone()) + object.events.get(key).map(|tags| tags[object.idx].clone()) } diff --git a/modules/src/handler.rs b/modules/src/handler.rs index f8bf203ebb..0325101934 100644 --- a/modules/src/handler.rs +++ b/modules/src/handler.rs @@ -1,79 +1,64 @@ -use crate::events::IbcEvent; -use crate::prelude::*; +use crate::{events::IbcEvent, prelude::*}; use core::marker::PhantomData; pub type HandlerResult = Result, E>; #[derive(Clone, Debug)] pub struct HandlerOutput { - pub result: T, - pub log: Vec, - pub events: Vec, + pub result: T, + pub log: Vec, + pub events: Vec, } impl HandlerOutput { - pub fn builder() -> HandlerOutputBuilder { - HandlerOutputBuilder::new() - } + pub fn builder() -> HandlerOutputBuilder { + HandlerOutputBuilder::new() + } } #[derive(Clone, Debug, Default)] pub struct HandlerOutputBuilder { - log: Vec, - events: Vec, - marker: PhantomData, + log: Vec, + events: Vec, + marker: PhantomData, } impl HandlerOutputBuilder { - pub fn new() -> Self { - Self { - log: Vec::new(), - events: Vec::new(), - marker: PhantomData, - } - } + pub fn new() -> Self { + Self { log: Vec::new(), events: Vec::new(), marker: PhantomData } + } - pub fn with_log(mut self, log: impl Into>) -> Self { - self.log.append(&mut log.into()); - self - } + pub fn with_log(mut self, log: impl Into>) -> Self { + self.log.append(&mut log.into()); + self + } - pub fn log(&mut self, log: impl Into) { - self.log.push(log.into()); - } + pub fn log(&mut self, log: impl Into) { + self.log.push(log.into()); + } - pub fn with_events(mut self, mut events: Vec) -> Self { - self.events.append(&mut events); - self - } + pub fn with_events(mut self, mut events: Vec) -> Self { + self.events.append(&mut events); + self + } - pub fn emit(&mut self, event: E) { - self.events.push(event); - } + pub fn emit(&mut self, event: E) { + self.events.push(event); + } - pub fn with_result(self, result: T) -> HandlerOutput { - HandlerOutput { - result, - log: self.log, - events: self.events, - } - } + pub fn with_result(self, result: T) -> HandlerOutput { + HandlerOutput { result, log: self.log, events: self.events } + } - pub fn merge>(&mut self, other: HandlerOutputBuilder<(), Event>) { - let HandlerOutputBuilder { - mut log, events, .. - } = other; - self.log.append(&mut log); - self.events - .append(&mut events.into_iter().map(Into::into).collect()); - } + pub fn merge>(&mut self, other: HandlerOutputBuilder<(), Event>) { + let HandlerOutputBuilder { mut log, events, .. } = other; + self.log.append(&mut log); + self.events.append(&mut events.into_iter().map(Into::into).collect()); + } - pub fn merge_output>(&mut self, other: HandlerOutput<(), Event>) { - let HandlerOutput { - mut log, events, .. - } = other; - self.log.append(&mut log); - self.events - .append(&mut events.into_iter().map(Into::into).collect()); - } + pub fn merge_output>(&mut self, other: HandlerOutput<(), Event>) { + let HandlerOutput { mut log, events, .. } = other; + self.log.append(&mut log); + self.events.append(&mut events.into_iter().map(Into::into).collect()); + } } diff --git a/modules/src/lib.rs b/modules/src/lib.rs index 758f180223..0571e1a3c8 100644 --- a/modules/src/lib.rs +++ b/modules/src/lib.rs @@ -1,17 +1,16 @@ // TODO: disable unwraps: // https://github.com/informalsystems/ibc-rs/issues/987 // #![cfg_attr(not(test), deny(clippy::unwrap_used))] - -#![no_std] +#![cfg_attr(not(feature = "std"), no_std)] #![allow(clippy::large_enum_variant)] #![deny( - warnings, + // warnings, trivial_casts, trivial_numeric_casts, unused_import_braces, - unused_qualifications, - rust_2018_idioms + unused_qualifications )] +#![cfg_attr(not(test), deny(rust_2018_idioms))] #![forbid(unsafe_code)] //! This library implements the InterBlockchain Communication (IBC) protocol in Rust. IBC is @@ -34,8 +33,9 @@ //! `Applications` consists of various packet encoding and processing semantics which underpin the //! various types of transactions that users can perform on any IBC-compliant chain. //! -//! `Relayer` contains utilities for testing the `ibc` crate against the [Hermes IBC relayer][relayer-repo]. It acts -//! as scaffolding for gluing the `ibc` crate with Hermes for testing purposes. +//! `Relayer` contains utilities for testing the `ibc` crate against the [Hermes IBC +//! relayer][relayer-repo]. It acts as scaffolding for gluing the `ibc` crate with Hermes for +//! testing purposes. //! //! [core]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/core //! [clients]: https://github.com/informalsystems/ibc-rs/tree/master/modules/src/clients @@ -45,37 +45,43 @@ //! [relayer-repo]: https://github.com/informalsystems/ibc-rs/tree/master/relayer extern crate alloc; - +#[allow(unused_imports)] +#[macro_use] +extern crate derive; +#[cfg(any(test, feature = "mocks"))] +#[macro_use] +extern crate serde; #[cfg(feature = "std")] extern crate std; -mod prelude; +pub mod prelude; pub mod applications; pub mod bigint; -pub mod clients; pub mod core; pub mod events; pub mod handler; pub mod keys; +#[macro_use] pub mod macros; pub mod proofs; -pub mod query; -pub mod relayer; +mod serializers; pub mod signer; pub mod timestamp; -pub mod tx_msg; +pub mod tx_msg; // Context mock, the underlying host chain, and client types: for testing all handlers. -mod serializers; +pub mod protobuf { + pub use tendermint_proto::*; +} /// Re-export of ICS 002 Height domain type -pub type Height = crate::core::ics02_client::height::Height; +pub type Height = core::ics02_client::height::Height; -#[cfg(test)] -mod test; +#[cfg(any(test, feature = "mocks"))] +pub mod test; #[cfg(any(test, feature = "mocks"))] pub mod test_utils; #[cfg(any(test, feature = "mocks"))] -pub mod mock; // Context mock, the underlying host chain, and client types: for testing all handlers. +pub mod mock; diff --git a/modules/src/macros.rs b/modules/src/macros.rs index c4ad6655d7..fc1d5c999b 100644 --- a/modules/src/macros.rs +++ b/modules/src/macros.rs @@ -32,7 +32,7 @@ #[macro_export] macro_rules! downcast { ( $e1:expr => $p1:path, $( $e:expr => $p:path ),+ $(,)? ) => { - downcast!($e1 => $p1).zip(downcast!($($e => $p),+)) + $crate::downcast!($e1 => $p1).zip($crate::downcast!($($e => $p),+)) }; ($e:expr => $p:path) => { diff --git a/modules/src/mock/client_def.rs b/modules/src/mock/client_def.rs index 427d7f3e29..b33344843e 100644 --- a/modules/src/mock/client_def.rs +++ b/modules/src/mock/client_def.rs @@ -1,186 +1,256 @@ -use ibc_proto::ibc::core::commitment::v1::MerkleProof; - -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_def::ClientDef; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::context::ClientReader; -use crate::core::ics02_client::error::Error; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::context::ChannelReader; -use crate::core::ics04_channel::packet::Sequence; -use crate::core::ics23_commitment::commitment::{ - CommitmentPrefix, CommitmentProofBytes, CommitmentRoot, +use crate::core::ics02_client::{ + client_consensus::ConsensusState, + client_def::{ClientDef, ConsensusUpdateResult}, }; -use crate::core::ics23_commitment::merkle::apply_prefix; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::core::ics24_host::path::ClientConsensusStatePath; -use crate::core::ics24_host::Path; -use crate::mock::client_state::{MockClientState, MockConsensusState}; -use crate::mock::header::MockHeader; -use crate::prelude::*; -use crate::Height; + +use crate::mock::header::MockClientMessage; +use crate::{ + core::{ + ics02_client::error::Error, + ics03_connection::connection::ConnectionEnd, + ics04_channel::{ + channel::ChannelEnd, + commitment::{AcknowledgementCommitment, PacketCommitment}, + packet::Sequence, + }, + ics23_commitment::{ + commitment::{CommitmentPrefix, CommitmentProofBytes, CommitmentRoot}, + merkle::apply_prefix, + }, + ics24_host::{ + identifier::{ChannelId, ClientId, ConnectionId, PortId}, + path::ClientConsensusStatePath, + Path, + }, + ics26_routing::context::ReaderContext, + }, + mock::{ + client_state::{AnyClientState, AnyConsensusState, MockClientState, MockConsensusState}, + header::AnyClientMessage, + }, + prelude::*, + Height, +}; +use core::fmt::Debug; + +#[derive(Clone, Debug, PartialEq, Eq, ClientDef)] +pub enum AnyClient { + Mock(MockClient), +} #[derive(Clone, Debug, PartialEq, Eq)] pub struct MockClient; +impl Default for MockClient { + fn default() -> Self { + Self + } +} + impl ClientDef for MockClient { - type Header = MockHeader; - type ClientState = MockClientState; - type ConsensusState = MockConsensusState; - - fn check_header_and_update_state( - &self, - _ctx: &dyn ClientReader, - _client_id: ClientId, - client_state: Self::ClientState, - header: Self::Header, - ) -> Result<(Self::ClientState, Self::ConsensusState), Error> { - if client_state.latest_height() >= header.height() { - return Err(Error::low_header_height( - header.height(), - client_state.latest_height(), - )); - } - Ok(( - MockClientState::new(header), - MockConsensusState::new(header), - )) - } - - fn verify_client_consensus_state( - &self, - _client_state: &Self::ClientState, - _height: Height, - prefix: &CommitmentPrefix, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - client_id: &ClientId, - consensus_height: Height, - _expected_consensus_state: &AnyConsensusState, - ) -> Result<(), Error> { - let client_prefixed_path = Path::ClientConsensusState(ClientConsensusStatePath { - client_id: client_id.clone(), - epoch: consensus_height.revision_number, - height: consensus_height.revision_height, - }) - .to_string(); - - let _path = apply_prefix(prefix, vec![client_prefixed_path]); - - Ok(()) - } - - fn verify_connection_state( - &self, - _client_state: &Self::ClientState, - _height: Height, - _prefix: &CommitmentPrefix, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _connection_id: &ConnectionId, - _expected_connection_end: &ConnectionEnd, - ) -> Result<(), Error> { - Ok(()) - } - - fn verify_channel_state( - &self, - _client_state: &Self::ClientState, - _height: Height, - _prefix: &CommitmentPrefix, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _port_id: &PortId, - _channel_id: &ChannelId, - _expected_channel_end: &ChannelEnd, - ) -> Result<(), Error> { - Ok(()) - } - - fn verify_client_full_state( - &self, - _client_state: &Self::ClientState, - _height: Height, - _prefix: &CommitmentPrefix, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _client_id: &ClientId, - _expected_client_state: &AnyClientState, - ) -> Result<(), Error> { - Ok(()) - } - - fn verify_packet_data( - &self, - _ctx: &dyn ChannelReader, - _client_state: &Self::ClientState, - _height: Height, - _connection_end: &ConnectionEnd, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _port_id: &PortId, - _channel_id: &ChannelId, - _sequence: Sequence, - _commitment: PacketCommitment, - ) -> Result<(), Error> { - Ok(()) - } - - fn verify_packet_acknowledgement( - &self, - _ctx: &dyn ChannelReader, - _client_state: &Self::ClientState, - _height: Height, - _connection_end: &ConnectionEnd, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _port_id: &PortId, - _channel_id: &ChannelId, - _sequence: Sequence, - _ack: AcknowledgementCommitment, - ) -> Result<(), Error> { - Ok(()) - } - - fn verify_next_sequence_recv( - &self, - _ctx: &dyn ChannelReader, - _client_state: &Self::ClientState, - _height: Height, - _connection_end: &ConnectionEnd, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _port_id: &PortId, - _channel_id: &ChannelId, - _sequence: Sequence, - ) -> Result<(), Error> { - Ok(()) - } - - fn verify_packet_receipt_absence( - &self, - _ctx: &dyn ChannelReader, - _client_state: &Self::ClientState, - _height: Height, - _connection_end: &ConnectionEnd, - _proof: &CommitmentProofBytes, - _root: &CommitmentRoot, - _port_id: &PortId, - _channel_id: &ChannelId, - _sequence: Sequence, - ) -> Result<(), Error> { - Ok(()) - } - - fn verify_upgrade_and_update_state( - &self, - client_state: &Self::ClientState, - consensus_state: &Self::ConsensusState, - _proof_upgrade_client: MerkleProof, - _proof_upgrade_consensus_state: MerkleProof, - ) -> Result<(Self::ClientState, Self::ConsensusState), Error> { - Ok((*client_state, consensus_state.clone())) - } + type ClientMessage = MockClientMessage; + type ClientState = MockClientState; + type ConsensusState = MockConsensusState; + + fn update_state( + &self, + _ctx: &Ctx, + _client_id: ClientId, + client_state: Self::ClientState, + client_message: Self::ClientMessage, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + if client_state.latest_height() >= client_message.height() { + return Err(Error::low_header_height( + client_message.height(), + client_state.latest_height(), + )); + } + + let header = client_message.header(); + Ok(( + MockClientState::new(client_message), + ConsensusUpdateResult::Single( + Ctx::AnyConsensusState::wrap(&MockConsensusState::new(header)).unwrap(), + ), + )) + } + + fn verify_client_consensus_state( + &self, + _ctx: &Ctx, + _client_state: &Self::ClientState, + _height: Height, + prefix: &CommitmentPrefix, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + client_id: &ClientId, + consensus_height: Height, + _expected_consensus_state: &Ctx::AnyConsensusState, + ) -> Result<(), Error> { + let client_prefixed_path = Path::ClientConsensusState(ClientConsensusStatePath { + client_id: client_id.clone(), + epoch: consensus_height.revision_number, + height: consensus_height.revision_height, + }) + .to_string(); + + let _path = apply_prefix(prefix, vec![client_prefixed_path]); + + Ok(()) + } + + fn verify_connection_state( + &self, + _ctx: &Ctx, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _prefix: &CommitmentPrefix, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _connection_id: &ConnectionId, + _expected_connection_end: &ConnectionEnd, + ) -> Result<(), Error> { + Ok(()) + } + + fn verify_channel_state( + &self, + _ctx: &Ctx, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _prefix: &CommitmentPrefix, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _port_id: &PortId, + _channel_id: &ChannelId, + _expected_channel_end: &ChannelEnd, + ) -> Result<(), Error> { + Ok(()) + } + + fn verify_client_full_state( + &self, + _ctx: &Ctx, + _client_state: &Self::ClientState, + _height: Height, + _prefix: &CommitmentPrefix, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _client_id: &ClientId, + _expected_client_state: &Ctx::AnyClientState, + ) -> Result<(), Error> { + Ok(()) + } + + fn verify_packet_data( + &self, + _ctx: &Ctx, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _connection_end: &ConnectionEnd, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _port_id: &PortId, + _channel_id: &ChannelId, + _sequence: Sequence, + _commitment: PacketCommitment, + ) -> Result<(), Error> { + Ok(()) + } + + fn verify_packet_acknowledgement( + &self, + _ctx: &Ctx, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _connection_end: &ConnectionEnd, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _port_id: &PortId, + _channel_id: &ChannelId, + _sequence: Sequence, + _ack: AcknowledgementCommitment, + ) -> Result<(), Error> { + Ok(()) + } + + fn verify_next_sequence_recv( + &self, + _ctx: &Ctx, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _connection_end: &ConnectionEnd, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _port_id: &PortId, + _channel_id: &ChannelId, + _sequence: Sequence, + ) -> Result<(), Error> { + Ok(()) + } + + fn verify_packet_receipt_absence( + &self, + _ctx: &Ctx, + _client_id: &ClientId, + _client_state: &Self::ClientState, + _height: Height, + _connection_end: &ConnectionEnd, + _proof: &CommitmentProofBytes, + _root: &CommitmentRoot, + _port_id: &PortId, + _channel_id: &ChannelId, + _sequence: Sequence, + ) -> Result<(), Error> { + Ok(()) + } + + fn verify_upgrade_and_update_state( + &self, + _ctx: &Ctx, + _client_id: ClientId, + _old_client_state: &Self::ClientState, + upgrade_client_state: &Self::ClientState, + upgrade_consensus_state: &Self::ConsensusState, + _proof_upgrade_client: Vec, + _proof_upgrade_consensus_state: Vec, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Error> { + Ok(( + *upgrade_client_state, + ConsensusUpdateResult::Single(Ctx::AnyConsensusState::wrap(upgrade_consensus_state).unwrap()), + )) + } + + fn verify_client_message( + &self, + _ctx: &Ctx, + _client_id: ClientId, + _client_state: Self::ClientState, + _client_msg: Self::ClientMessage, + ) -> Result<(), Error> { + Ok(()) + } + + fn update_state_on_misbehaviour( + &self, + client_state: Self::ClientState, + _client_msg: Self::ClientMessage, + ) -> Result { + Ok(client_state) + } + + fn check_for_misbehaviour( + &self, + _ctx: &Ctx, + _client_id: ClientId, + _client_state: Self::ClientState, + _client_msg: Self::ClientMessage, + ) -> Result { + Ok(false) + } } diff --git a/modules/src/mock/client_state.rs b/modules/src/mock/client_state.rs index aa4d7863ca..677e197f63 100644 --- a/modules/src/mock/client_state.rs +++ b/modules/src/mock/client_state.rs @@ -2,192 +2,317 @@ use crate::prelude::*; use alloc::collections::btree_map::BTreeMap as HashMap; -use core::convert::Infallible; -use core::time::Duration; +use core::{convert::Infallible, fmt::Debug, time::Duration}; +use ibc_proto::google::protobuf::Any; +use ibc_proto::ibc::core::client::v1::ConsensusStateWithHeight; use serde::{Deserialize, Serialize}; use tendermint_proto::Protobuf; -use ibc_proto::ibc::mock::ClientState as RawMockClientState; -use ibc_proto::ibc::mock::ConsensusState as RawMockConsensusState; - -use crate::core::ics02_client::client_consensus::{AnyConsensusState, ConsensusState}; -use crate::core::ics02_client::client_state::{AnyClientState, ClientState}; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error; -use crate::core::ics23_commitment::commitment::CommitmentRoot; -use crate::core::ics24_host::identifier::ChainId; -use crate::mock::header::MockHeader; -use crate::timestamp::Timestamp; -use crate::Height; +use crate::core::ics02_client::context::ClientTypes; +use crate::core::ics02_client::error::Error as Ics02Error; +use crate::mock::header::MockClientMessage; +use crate::{ + core::{ + ics02_client::{ + client_consensus::ConsensusState, + client_state::{ClientState, ClientType}, + error::Error, + }, + ics23_commitment::commitment::CommitmentRoot, + ics24_host::identifier::ChainId, + }, + downcast, + mock::{ + client_def::{AnyClient, MockClient}, + context::HostBlockType, + header::MockHeader, + }, + timestamp::Timestamp, + Height, +}; +use ibc_proto::ibc::mock::{ + ClientState as RawMockClientState, ConsensusState as RawMockConsensusState, +}; + +pub const MOCK_CLIENT_STATE_TYPE_URL: &str = "/ibc.mock.ClientState"; /// A mock of an IBC client record as it is stored in a mock context. /// For testing ICS02 handlers mostly, cf. `MockClientContext`. #[derive(Clone, Debug)] -pub struct MockClientRecord { - /// The type of this client. - pub client_type: ClientType, +pub struct MockClientRecord { + /// The type of this client. + pub client_type: ClientType, + + /// The client state (representing only the latest height at the moment). + pub client_state: Option, - /// The client state (representing only the latest height at the moment). - pub client_state: Option, + /// Mapping of heights to consensus states for this client. + pub consensus_states: HashMap, +} - /// Mapping of heights to consensus states for this client. - pub consensus_states: HashMap, +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "type")] +pub enum AnyUpgradeOptions { + Mock(()), +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, ClientState, Protobuf)] +#[serde(tag = "type")] +pub enum AnyClientState { + #[ibc(proto_url = "MOCK_CLIENT_STATE_TYPE_URL")] + Mock(MockClientState), } /// A mock of a client state. For an example of a real structure that this mocks, you can see /// `ClientState` of ics07_tendermint/client_state.rs. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Copy)] pub struct MockClientState { - pub header: MockHeader, - pub frozen_height: Option, + pub header: MockHeader, + pub frozen_height: Option, } impl Protobuf for MockClientState {} impl MockClientState { - pub fn new(header: MockHeader) -> Self { - Self { - header, - frozen_height: None, - } - } - - pub fn latest_height(&self) -> Height { - self.header.height() - } - - pub fn refresh_time(&self) -> Option { - None - } + pub fn new(client_message: MockClientMessage) -> Self { + Self { header: client_message.header(), frozen_height: None } + } - pub fn expired(&self, _elapsed: Duration) -> bool { - false - } + pub fn refresh_time(&self) -> Option { + None + } } impl From for AnyClientState { - fn from(mcs: MockClientState) -> Self { - Self::Mock(mcs) - } + fn from(mcs: MockClientState) -> Self { + Self::Mock(mcs) + } } impl TryFrom for MockClientState { - type Error = Error; + type Error = Error; - fn try_from(raw: RawMockClientState) -> Result { - Ok(Self::new(raw.header.unwrap().try_into()?)) - } + fn try_from(raw: RawMockClientState) -> Result { + Ok(Self::new(MockHeader::try_from(raw.header.unwrap())?.into())) + } } impl From for RawMockClientState { - fn from(value: MockClientState) -> Self { - RawMockClientState { - header: Some(ibc_proto::ibc::mock::Header { - height: Some(value.header.height().into()), - timestamp: value.header.timestamp.nanoseconds(), - }), - } - } + fn from(value: MockClientState) -> Self { + RawMockClientState { + header: Some(ibc_proto::ibc::mock::Header { + height: Some(value.header.height().into()), + timestamp: value.header.timestamp.nanoseconds(), + }), + } + } } impl ClientState for MockClientState { - type UpgradeOptions = (); + type UpgradeOptions = (); + type ClientDef = MockClient; - fn chain_id(&self) -> ChainId { - todo!() - } + fn chain_id(&self) -> ChainId { + self.chain_id() + } - fn client_type(&self) -> ClientType { - ClientType::Mock - } + fn client_def(&self) -> Self::ClientDef { + MockClient::default() + } - fn latest_height(&self) -> Height { - self.header.height() - } + fn client_type(&self) -> ClientType { + Self::client_type() + } - fn frozen_height(&self) -> Option { - self.frozen_height - } + fn latest_height(&self) -> Height { + self.latest_height() + } - fn upgrade(self, _upgrade_height: Height, _upgrade_options: (), _chain_id: ChainId) -> Self { - todo!() - } + fn frozen_height(&self) -> Option { + self.frozen_height() + } - fn wrap_any(self) -> AnyClientState { - AnyClientState::Mock(self) - } + fn upgrade(self, _upgrade_height: Height, _upgrade_options: (), _chain_id: ChainId) -> Self { + self.upgrade(_upgrade_height, _upgrade_options, _chain_id) + } + + fn expired(&self, elapsed: Duration) -> bool { + self.expired(elapsed) + } + + fn encode_to_vec(&self) -> Vec { + self.encode_vec() + } +} + +impl MockClientState { + pub fn chain_id(&self) -> ChainId { + ChainId::default() + } + + pub fn client_type() -> ClientType { + "9999-mock".to_string() + } + + pub fn latest_height(&self) -> Height { + self.header.height() + } + + pub fn frozen_height(&self) -> Option { + self.frozen_height + } + + pub fn upgrade( + self, + _upgrade_height: Height, + _upgrade_options: (), + _chain_id: ChainId, + ) -> Self { + todo!() + } + + pub fn expired(&self, _elapsed: Duration) -> bool { + false + } } impl From for MockClientState { - fn from(cs: MockConsensusState) -> Self { - Self::new(cs.header) - } + fn from(cs: MockConsensusState) -> Self { + Self::new(cs.header.into()) + } +} + +pub const MOCK_CONSENSUS_STATE_TYPE_URL: &str = "/ibc.mock.ConsensusState"; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize, ConsensusState, Protobuf)] +#[serde(tag = "type")] +pub enum AnyConsensusState { + #[ibc(proto_url = "MOCK_CONSENSUS_STATE_TYPE_URL")] + Mock(MockConsensusState), +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize)] +pub struct AnyConsensusStateWithHeight { + pub height: Height, + pub consensus_state: C::AnyConsensusState, +} + +impl Protobuf for AnyConsensusStateWithHeight +where + C::AnyClientMessage: TryFrom + Into + From, + C::AnyClientState: Eq + TryFrom + Into, + C::AnyConsensusState: + Eq + TryFrom + Into + From + 'static, +{ +} + +impl TryFrom for AnyConsensusStateWithHeight +where + C::AnyClientMessage: TryFrom + Into + From, + C::AnyClientState: Eq + TryFrom + Into, + C::AnyConsensusState: + Eq + TryFrom + Into + From + 'static, +{ + type Error = Error; + + fn try_from(value: ConsensusStateWithHeight) -> Result { + let state = value + .consensus_state + .map(C::AnyConsensusState::try_from) + .transpose()? + .ok_or_else(Error::empty_consensus_state_response)?; + + Ok(AnyConsensusStateWithHeight { + height: value.height.ok_or_else(Error::missing_height)?.into(), + consensus_state: state, + }) + } +} + +impl From> for ConsensusStateWithHeight +where + C::AnyClientMessage: TryFrom + Into + From, + C::AnyClientState: Eq + TryFrom + Into, + C::AnyConsensusState: + Eq + TryFrom + Into + From + 'static, +{ + fn from(value: AnyConsensusStateWithHeight) -> Self { + ConsensusStateWithHeight { + height: Some(value.height.into()), + consensus_state: Some(value.consensus_state.into()), + } + } } #[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct MockConsensusState { - pub header: MockHeader, - pub root: CommitmentRoot, + pub header: MockHeader, + pub root: CommitmentRoot, } impl MockConsensusState { - pub fn new(header: MockHeader) -> Self { - MockConsensusState { - header, - root: CommitmentRoot::from(vec![0]), - } - } + pub fn new(header: MockHeader) -> Self { + MockConsensusState { header, root: CommitmentRoot::from(vec![0]) } + } - pub fn timestamp(&self) -> Timestamp { - self.header.timestamp - } + pub fn timestamp(&self) -> Timestamp { + self.header.timestamp + } } impl Protobuf for MockConsensusState {} impl TryFrom for MockConsensusState { - type Error = Error; + type Error = Error; - fn try_from(raw: RawMockConsensusState) -> Result { - let raw_header = raw.header.ok_or_else(Error::missing_raw_consensus_state)?; + fn try_from(raw: RawMockConsensusState) -> Result { + let raw_header = raw.header.ok_or_else(Error::missing_raw_consensus_state)?; - Ok(Self { - header: MockHeader::try_from(raw_header)?, - root: CommitmentRoot::from(vec![0]), - }) - } + Ok(Self { header: MockHeader::try_from(raw_header)?, root: CommitmentRoot::from(vec![0]) }) + } } impl From for RawMockConsensusState { - fn from(value: MockConsensusState) -> Self { - RawMockConsensusState { - header: Some(ibc_proto::ibc::mock::Header { - height: Some(value.header.height().into()), - timestamp: value.header.timestamp.nanoseconds(), - }), - } - } + fn from(value: MockConsensusState) -> Self { + RawMockConsensusState { + header: Some(ibc_proto::ibc::mock::Header { + height: Some(value.header.height().into()), + timestamp: value.header.timestamp.nanoseconds(), + }), + } + } } impl From for AnyConsensusState { - fn from(mcs: MockConsensusState) -> Self { - Self::Mock(mcs) - } + fn from(mcs: MockConsensusState) -> Self { + Self::Mock(mcs) + } +} + +impl TryFrom for MockConsensusState { + type Error = Error; + + fn try_from(value: AnyConsensusState) -> Result { + downcast!( + value => AnyConsensusState::Mock + ) + .ok_or_else(|| Error::client_args_type_mismatch(MockClientState::client_type().to_owned())) + } } impl ConsensusState for MockConsensusState { - type Error = Infallible; + type Error = Infallible; - fn client_type(&self) -> ClientType { - ClientType::Mock - } + fn root(&self) -> &CommitmentRoot { + &self.root + } - fn root(&self) -> &CommitmentRoot { - &self.root - } + fn timestamp(&self) -> Timestamp { + self.timestamp() + } - fn wrap_any(self) -> AnyConsensusState { - AnyConsensusState::Mock(self) - } + fn encode_to_vec(&self) -> Vec { + self.encode_vec() + } } diff --git a/modules/src/mock/context.rs b/modules/src/mock/context.rs index 4c59475803..12de1320b1 100644 --- a/modules/src/mock/context.rs +++ b/modules/src/mock/context.rs @@ -1,1578 +1,1405 @@ //! Implementation of a global context mock. Used in testing handlers of all IBC modules. - use crate::prelude::*; -use alloc::collections::btree_map::BTreeMap; -use alloc::sync::Arc; -use core::borrow::Borrow; -use core::cmp::min; -use core::fmt::Debug; -use core::ops::{Add, Sub}; -use core::time::Duration; -use std::sync::Mutex; +use alloc::{collections::btree_map::BTreeMap, sync::Arc}; +use core::{ + borrow::Borrow, + cmp::min, + fmt::{Debug, Formatter}, + ops::{Add, Sub}, + time::Duration, +}; +use std::{marker::PhantomData, sync::Mutex}; use ibc_proto::google::protobuf::Any; use sha2::Digest; use tracing::debug; -use crate::clients::ics07_tendermint::client_state::test_util::get_dummy_tendermint_client_state; -use crate::core::ics02_client::client_consensus::{AnyConsensusState, AnyConsensusStateWithHeight}; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::context::{ClientKeeper, ClientReader}; -use crate::core::ics02_client::error::Error as Ics02Error; -use crate::core::ics02_client::header::AnyHeader; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::context::{ConnectionKeeper, ConnectionReader}; -use crate::core::ics03_connection::error::Error as Ics03Error; -use crate::core::ics04_channel::channel::ChannelEnd; -use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::context::{ChannelKeeper, ChannelReader}; -use crate::core::ics04_channel::error::Error as Ics04Error; -use crate::core::ics04_channel::packet::{Receipt, Sequence}; -use crate::core::ics05_port::context::PortReader; -use crate::core::ics05_port::error::Error as Ics05Error; -use crate::core::ics05_port::error::Error; -use crate::core::ics23_commitment::commitment::CommitmentPrefix; -use crate::core::ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::{Ics26Context, Module, ModuleId, Router, RouterBuilder}; -use crate::core::ics26_routing::handler::{deliver, dispatch, MsgReceipt}; -use crate::core::ics26_routing::msgs::Ics26Envelope; -use crate::events::IbcEvent; -use crate::mock::client_state::{MockClientRecord, MockClientState, MockConsensusState}; -use crate::mock::header::MockHeader; -use crate::mock::host::{HostBlock, HostType}; -use crate::relayer::ics18_relayer::context::Ics18Context; -use crate::relayer::ics18_relayer::error::Error as Ics18Error; -use crate::signer::Signer; -use crate::timestamp::Timestamp; -use crate::Height; +use crate::core::ics02_client::context::ClientTypes; +#[cfg(test)] +use crate::core::ics02_client::events::Attributes; +use crate::{ + core::{ + ics02_client::{ + client_state::{ClientState, ClientType}, + context::{ClientKeeper, ClientReader}, + error::Error as Ics02Error, + }, + ics03_connection::{ + connection::ConnectionEnd, + context::{ConnectionKeeper, ConnectionReader}, + error::Error as Ics03Error, + }, + ics04_channel::{ + channel::ChannelEnd, + commitment::{AcknowledgementCommitment, PacketCommitment}, + context::{ChannelKeeper, ChannelReader}, + error::Error as Ics04Error, + packet::{Receipt, Sequence}, + }, + ics05_port::{ + context::PortReader, + error::{Error as Ics05Error, Error}, + }, + ics23_commitment::commitment::CommitmentPrefix, + ics24_host::identifier::{ChainId, ChannelId, ClientId, ConnectionId, PortId}, + ics26_routing::{ + context::{Ics26Context, Module, ModuleId, ReaderContext, Router, RouterBuilder}, + error::Error as Ics26Error, + handler::dispatch, + msgs::Ics26Envelope, + }, + }, + mock::{ + client_def::AnyClient, + client_state::{ + AnyClientState, AnyConsensusState, AnyConsensusStateWithHeight, MockClientRecord, + MockClientState, MockConsensusState, + }, + header::{AnyClientMessage, MockHeader}, + host::{HostBlock, MockHostBlock}, + }, + timestamp::Timestamp, + Height, +}; pub const DEFAULT_BLOCK_TIME_SECS: u64 = 3; /// A context implementing the dependencies necessary for testing any IBC module. #[derive(Debug)] -pub struct MockContext { - /// The type of host chain underlying this mock context. - host_chain_type: HostType, +pub struct MockContext { + /// The type of host chain underlying this mock context. + pub host_chain_type: ::HostType, + + /// Host chain identifier. + pub host_chain_id: ChainId, - /// Host chain identifier. - host_chain_id: ChainId, + /// Maximum size for the history of the host chain. Any block older than this is pruned. + pub max_history_size: usize, - /// Maximum size for the history of the host chain. Any block older than this is pruned. - max_history_size: usize, + /// The chain of blocks underlying this context. A vector of size up to `max_history_size` + /// blocks, ascending order by their height (latest block is on the last position). + pub history: Vec, - /// The chain of blocks underlying this context. A vector of size up to `max_history_size` - /// blocks, ascending order by their height (latest block is on the last position). - history: Vec, + /// Average time duration between blocks + pub block_time: Duration, - /// Average time duration between blocks - block_time: Duration, + /// An object that stores all IBC related data. + pub ibc_store: Arc>>, - /// An object that stores all IBC related data. - pub ibc_store: Arc>, + /// ICS26 router impl + pub router: MockRouter, - /// ICS26 router impl - router: MockRouter, + pub _phantom: PhantomData, } -/// Returns a MockContext with bare minimum initialization: no clients, no connections and no channels are -/// present, and the chain has Height(5). This should be used sparingly, mostly for testing the -/// creation of new domain objects. -impl Default for MockContext { - fn default() -> Self { - Self::new( - ChainId::new("mockgaia".to_string(), 0), - HostType::Mock, - 5, - Height::new(0, 5), - ) - } +impl PartialEq for MockContext { + fn eq(&self, _other: &Self) -> bool { + unimplemented!() + } +} + +impl Eq for MockContext {} + +/// Returns a MockContext with bare minimum initialization: no clients, no connections and no +/// channels are present, and the chain has Height(5). This should be used sparingly, mostly for +/// testing the creation of new domain objects. +impl Default for MockContext +where + C::AnyClientMessage: TryFrom + Into + From, + C::AnyClientState: Eq + TryFrom + Into, + C::AnyConsensusState: + Eq + TryFrom + Into + From + 'static, +{ + fn default() -> Self { + Self::new( + ChainId::new("mockgaia".to_string(), 0), + ::HostType::default(), + 5, + Height::new(0, 5), + ) + } } /// A manual clone impl is provided because the tests are oblivious to the fact that the `ibc_store` /// is a shared ptr. -impl Clone for MockContext { - fn clone(&self) -> Self { - let ibc_store = { - let ibc_store = self.ibc_store.lock().unwrap().clone(); - Arc::new(Mutex::new(ibc_store)) - }; - Self { - host_chain_type: self.host_chain_type, - host_chain_id: self.host_chain_id.clone(), - max_history_size: self.max_history_size, - history: self.history.clone(), - block_time: self.block_time, - ibc_store, - router: self.router.clone(), - } - } +impl Clone for MockContext { + fn clone(&self) -> Self { + let ibc_store = { + let ibc_store = self.ibc_store.lock().unwrap().clone(); + Arc::new(Mutex::new(ibc_store)) + }; + Self { + host_chain_type: self.host_chain_type, + host_chain_id: self.host_chain_id.clone(), + max_history_size: self.max_history_size, + history: self.history.clone(), + block_time: self.block_time, + ibc_store, + router: self.router.clone(), + _phantom: Default::default(), + } + } } /// Implementation of internal interface for use in testing. The methods in this interface should /// _not_ be accessible to any Ics handler. -impl MockContext { - /// Creates a mock context. Parameter `max_history_size` determines how many blocks will - /// the chain maintain in its history, which also determines the pruning window. Parameter - /// `latest_height` determines the current height of the chain. This context - /// has support to emulate two type of underlying chains: Mock or SyntheticTendermint. - pub fn new( - host_id: ChainId, - host_type: HostType, - max_history_size: usize, - latest_height: Height, - ) -> Self { - assert_ne!( - max_history_size, 0, - "The chain must have a non-zero max_history_size" - ); - - assert_ne!( - latest_height.revision_height, 0, - "The chain must have a non-zero revision_height" - ); - - // Compute the number of blocks to store. - let n = min(max_history_size as u64, latest_height.revision_height); - - assert_eq!( - host_id.version(), - latest_height.revision_number, - "The version in the chain identifier must match the version in the latest height" - ); - - let block_time = Duration::from_secs(DEFAULT_BLOCK_TIME_SECS); - let next_block_timestamp = Timestamp::now().add(block_time).unwrap(); - MockContext { - host_chain_type: host_type, - host_chain_id: host_id.clone(), - max_history_size, - history: (0..n) - .rev() - .map(|i| { - // generate blocks with timestamps -> N, N - BT, N - 2BT, ... - // where N = now(), BT = block_time - HostBlock::generate_block( - host_id.clone(), - host_type, - latest_height.sub(i).unwrap().revision_height, - next_block_timestamp - .sub(Duration::from_secs(DEFAULT_BLOCK_TIME_SECS * (i + 1))) - .unwrap(), - ) - }) - .collect(), - block_time, - ibc_store: Arc::new(Mutex::new(MockIbcStore::default())), - router: Default::default(), - } - } - - /// Associates a client record to this context. - /// Given a client id and a height, registers a new client in the context and also associates - /// to this client a mock client state and a mock consensus state for height `height`. The type - /// of this client is implicitly assumed to be Mock. - pub fn with_client(self, client_id: &ClientId, height: Height) -> Self { - self.with_client_parametrized(client_id, height, Some(ClientType::Mock), Some(height)) - } - - /// Similar to `with_client`, this function associates a client record to this context, but - /// additionally permits to parametrize two details of the client. If `client_type` is None, - /// then the client will have type Mock, otherwise the specified type. If - /// `consensus_state_height` is None, then the client will be initialized with a consensus - /// state matching the same height as the client state (`client_state_height`). - pub fn with_client_parametrized( - self, - client_id: &ClientId, - client_state_height: Height, - client_type: Option, - consensus_state_height: Option, - ) -> Self { - let cs_height = consensus_state_height.unwrap_or(client_state_height); - - let client_type = client_type.unwrap_or(ClientType::Mock); - let (client_state, consensus_state) = match client_type { - // If it's a mock client, create the corresponding mock states. - ClientType::Mock => ( - Some(MockClientState::new(MockHeader::new(client_state_height)).into()), - MockConsensusState::new(MockHeader::new(cs_height)).into(), - ), - // If it's a Tendermint client, we need TM states. - ClientType::Tendermint => { - let light_block = HostBlock::generate_tm_block( - self.host_chain_id.clone(), - cs_height.revision_height, - Timestamp::now(), - ); - - let consensus_state = AnyConsensusState::from(light_block.clone()); - let client_state = - get_dummy_tendermint_client_state(light_block.signed_header.header); - - // Return the tuple. - (Some(client_state), consensus_state) - } - }; - let consensus_states = vec![(cs_height, consensus_state)].into_iter().collect(); - - debug!("consensus states: {:?}", consensus_states); - - let client_record = MockClientRecord { - client_type, - client_state, - consensus_states, - }; - self.ibc_store - .lock() - .unwrap() - .clients - .insert(client_id.clone(), client_record); - self - } - - pub fn with_client_parametrized_history( - self, - client_id: &ClientId, - client_state_height: Height, - client_type: Option, - consensus_state_height: Option, - ) -> Self { - let cs_height = consensus_state_height.unwrap_or(client_state_height); - let prev_cs_height = cs_height.clone().sub(1).unwrap_or(client_state_height); - - let client_type = client_type.unwrap_or(ClientType::Mock); - let now = Timestamp::now(); - - let (client_state, consensus_state) = match client_type { - // If it's a mock client, create the corresponding mock states. - ClientType::Mock => ( - Some(MockClientState::new(MockHeader::new(client_state_height)).into()), - MockConsensusState::new(MockHeader::new(cs_height)).into(), - ), - // If it's a Tendermint client, we need TM states. - ClientType::Tendermint => { - let light_block = HostBlock::generate_tm_block( - self.host_chain_id.clone(), - cs_height.revision_height, - now, - ); - - let consensus_state = AnyConsensusState::from(light_block.clone()); - let client_state = - get_dummy_tendermint_client_state(light_block.signed_header.header); - - // Return the tuple. - (Some(client_state), consensus_state) - } - }; - - let prev_consensus_state = match client_type { - // If it's a mock client, create the corresponding mock states. - ClientType::Mock => MockConsensusState::new(MockHeader::new(prev_cs_height)).into(), - // If it's a Tendermint client, we need TM states. - ClientType::Tendermint => { - let light_block = HostBlock::generate_tm_block( - self.host_chain_id.clone(), - prev_cs_height.revision_height, - now.sub(self.block_time).unwrap(), - ); - AnyConsensusState::from(light_block) - } - }; - - let consensus_states = vec![ - (prev_cs_height, prev_consensus_state), - (cs_height, consensus_state), - ] - .into_iter() - .collect(); - - debug!("consensus states: {:?}", consensus_states); - - let client_record = MockClientRecord { - client_type, - client_state, - consensus_states, - }; - - self.ibc_store - .lock() - .unwrap() - .clients - .insert(client_id.clone(), client_record); - self - } - - /// Associates a connection to this context. - pub fn with_connection( - self, - connection_id: ConnectionId, - connection_end: ConnectionEnd, - ) -> Self { - self.ibc_store - .lock() - .unwrap() - .connections - .insert(connection_id, connection_end); - self - } - - /// Associates a channel (in an arbitrary state) to this context. - pub fn with_channel( - self, - port_id: PortId, - chan_id: ChannelId, - channel_end: ChannelEnd, - ) -> Self { - let mut channels = self.ibc_store.lock().unwrap().channels.clone(); - channels.insert((port_id, chan_id), channel_end); - self.ibc_store.lock().unwrap().channels = channels; - self - } - - pub fn with_send_sequence( - self, - port_id: PortId, - chan_id: ChannelId, - seq_number: Sequence, - ) -> Self { - let mut next_sequence_send = self.ibc_store.lock().unwrap().next_sequence_send.clone(); - next_sequence_send.insert((port_id, chan_id), seq_number); - self.ibc_store.lock().unwrap().next_sequence_send = next_sequence_send; - self - } - - pub fn with_recv_sequence( - self, - port_id: PortId, - chan_id: ChannelId, - seq_number: Sequence, - ) -> Self { - let mut next_sequence_recv = self.ibc_store.lock().unwrap().next_sequence_recv.clone(); - next_sequence_recv.insert((port_id, chan_id), seq_number); - self.ibc_store.lock().unwrap().next_sequence_recv = next_sequence_recv; - self - } - - pub fn with_ack_sequence( - self, - port_id: PortId, - chan_id: ChannelId, - seq_number: Sequence, - ) -> Self { - let mut next_sequence_ack = self.ibc_store.lock().unwrap().next_sequence_send.clone(); - next_sequence_ack.insert((port_id, chan_id), seq_number); - self.ibc_store.lock().unwrap().next_sequence_ack = next_sequence_ack; - self - } - - pub fn with_height(self, target_height: Height) -> Self { - let latest_height = self.latest_height(); - if target_height.revision_number > latest_height.revision_number { - unimplemented!() - } else if target_height.revision_number < latest_height.revision_number { - panic!("Cannot rewind history of the chain to a smaller revision number!") - } else if target_height.revision_height < latest_height.revision_height { - panic!("Cannot rewind history of the chain to a smaller revision height!") - } else if target_height.revision_height > latest_height.revision_height { - // Repeatedly advance the host chain height till we hit the desired height - let mut ctx = MockContext { ..self }; - while ctx.latest_height().revision_height < target_height.revision_height { - ctx.advance_host_chain_height() - } - ctx - } else { - // Both the revision number and height match - self - } - } - - pub fn with_packet_commitment( - self, - port_id: PortId, - chan_id: ChannelId, - seq: Sequence, - data: PacketCommitment, - ) -> Self { - let mut packet_commitment = self.ibc_store.lock().unwrap().packet_commitment.clone(); - packet_commitment.insert((port_id, chan_id, seq), data); - self.ibc_store.lock().unwrap().packet_commitment = packet_commitment; - self - } - - pub fn with_router(self, router: MockRouter) -> Self { - Self { router, ..self } - } - - /// Accessor for a block of the local (host) chain from this context. - /// Returns `None` if the block at the requested height does not exist. - pub fn host_block(&self, target_height: Height) -> Option<&HostBlock> { - let target = target_height.revision_height as usize; - let latest = self.latest_height().revision_height as usize; - - // Check that the block is not too advanced, nor has it been pruned. - if (target > latest) || (target <= latest - self.history.len()) { - None // Block for requested height does not exist in history. - } else { - Some(&self.history[self.history.len() + target - latest - 1]) - } - } - - /// Triggers the advancing of the host chain, by extending the history of blocks (or headers). - pub fn advance_host_chain_height(&mut self) { - let latest_block = self.history.last().expect("history cannot be empty"); - let new_block = HostBlock::generate_block( - self.host_chain_id.clone(), - self.host_chain_type, - latest_block.height().increment().revision_height, - latest_block.timestamp().add(self.block_time).unwrap(), - ); - - // Append the new header at the tip of the history. - if self.history.len() >= self.max_history_size { - // History is full, we rotate and replace the tip with the new header. - self.history.rotate_left(1); - self.history[self.max_history_size - 1] = new_block; - } else { - // History is not full yet. - self.history.push(new_block); - } - } - - /// A datagram passes from the relayer to the IBC module (on host chain). - /// Alternative method to `Ics18Context::send` that does not exercise any serialization. - /// Used in testing the Ics18 algorithms, hence this may return a Ics18Error. - pub fn deliver(&mut self, msg: Ics26Envelope) -> Result<(), Ics18Error> { - dispatch(self, msg).map_err(Ics18Error::transaction_failed)?; - // Create a new block. - self.advance_host_chain_height(); - Ok(()) - } - - /// Validates this context. Should be called after the context is mutated by a test. - pub fn validate(&self) -> Result<(), String> { - // Check that the number of entries is not higher than window size. - if self.history.len() > self.max_history_size { - return Err("too many entries".to_string()); - } - - // Check the content of the history. - if !self.history.is_empty() { - // Get the highest block. - let lh = &self.history[self.history.len() - 1]; - // Check latest is properly updated with highest header height. - if lh.height() != self.latest_height() { - return Err("latest height is not updated".to_string()); - } - } - - // Check that headers in the history are in sequential order. - for i in 1..self.history.len() { - let ph = &self.history[i - 1]; - let h = &self.history[i]; - if ph.height().increment() != h.height() { - return Err("headers in history not sequential".to_string()); - } - } - Ok(()) - } - - pub fn add_port(&mut self, port_id: PortId) { - let module_id = ModuleId::new(format!("module{}", port_id).into()).unwrap(); - self.ibc_store - .lock() - .unwrap() - .port_to_module - .insert(port_id, module_id); - } - - pub fn scope_port_to_module(&mut self, port_id: PortId, module_id: ModuleId) { - self.ibc_store - .lock() - .unwrap() - .port_to_module - .insert(port_id, module_id); - } - - pub fn consensus_states(&self, client_id: &ClientId) -> Vec { - self.ibc_store.lock().unwrap().clients[client_id] - .consensus_states - .iter() - .map(|(k, v)| AnyConsensusStateWithHeight { - height: *k, - consensus_state: v.clone(), - }) - .collect() - } - - pub fn latest_client_states(&self, client_id: &ClientId) -> AnyClientState { - self.ibc_store.lock().unwrap().clients[client_id] - .client_state - .as_ref() - .unwrap() - .clone() - } - - pub fn latest_consensus_states( - &self, - client_id: &ClientId, - height: &Height, - ) -> AnyConsensusState { - self.ibc_store.lock().unwrap().clients[client_id] - .consensus_states - .get(height) - .unwrap() - .clone() - } - - #[inline] - fn latest_height(&self) -> Height { - self.history - .last() - .expect("history cannot be empty") - .height() - } - - pub fn ibc_store_share(&self) -> Arc> { - self.ibc_store.clone() - } +impl MockContext +where + C::AnyClientMessage: TryFrom + Into + From, + C::AnyClientState: Eq + TryFrom + Into, + C::AnyConsensusState: + Eq + TryFrom + Into + From + 'static, +{ + /// Creates a mock context. Parameter `max_history_size` determines how many blocks will + /// the chain maintain in its history, which also determines the pruning window. Parameter + /// `latest_height` determines the current height of the chain. This context + /// has support to emulate two type of underlying chains: Mock or SyntheticTendermint. + pub fn new( + host_id: ChainId, + host_type: ::HostType, + max_history_size: usize, + latest_height: Height, + ) -> Self { + assert_ne!(max_history_size, 0, "The chain must have a non-zero max_history_size"); + + assert_ne!( + latest_height.revision_height, 0, + "The chain must have a non-zero revision_height" + ); + + // Compute the number of blocks to store. + let n = min(max_history_size as u64, latest_height.revision_height); + + assert_eq!( + host_id.version(), + latest_height.revision_number, + "The version in the chain identifier must match the version in the latest height" + ); + + let block_time = Duration::from_secs(DEFAULT_BLOCK_TIME_SECS); + let next_block_timestamp = Timestamp::now().add(block_time).unwrap(); + MockContext { + host_chain_type: host_type, + host_chain_id: host_id.clone(), + max_history_size, + history: (0..n) + .rev() + .map(|i| { + // generate blocks with timestamps -> N, N - BT, N - 2BT, ... + // where N = now(), BT = block_time + ::generate_block( + host_id.clone(), + host_type, + latest_height.sub(i).unwrap().revision_height, + next_block_timestamp + .sub(Duration::from_secs(DEFAULT_BLOCK_TIME_SECS * (i + 1))) + .unwrap(), + ) + }) + .collect(), + block_time, + ibc_store: Arc::new(Mutex::new(MockIbcStore::::default())), + router: Default::default(), + _phantom: Default::default(), + } + } + + /// Associates a connection to this context. + pub fn with_connection( + self, + connection_id: ConnectionId, + connection_end: ConnectionEnd, + ) -> Self { + self.ibc_store.lock().unwrap().connections.insert(connection_id, connection_end); + self + } + + /// Associates a channel (in an arbitrary state) to this context. + pub fn with_channel( + self, + port_id: PortId, + chan_id: ChannelId, + channel_end: ChannelEnd, + ) -> Self { + let mut channels = self.ibc_store.lock().unwrap().channels.clone(); + channels.insert((port_id, chan_id), channel_end); + self.ibc_store.lock().unwrap().channels = channels; + self + } + + pub fn with_send_sequence( + self, + port_id: PortId, + chan_id: ChannelId, + seq_number: Sequence, + ) -> Self { + let mut next_sequence_send = self.ibc_store.lock().unwrap().next_sequence_send.clone(); + next_sequence_send.insert((port_id, chan_id), seq_number); + self.ibc_store.lock().unwrap().next_sequence_send = next_sequence_send; + self + } + + pub fn with_recv_sequence( + self, + port_id: PortId, + chan_id: ChannelId, + seq_number: Sequence, + ) -> Self { + let mut next_sequence_recv = self.ibc_store.lock().unwrap().next_sequence_recv.clone(); + next_sequence_recv.insert((port_id, chan_id), seq_number); + self.ibc_store.lock().unwrap().next_sequence_recv = next_sequence_recv; + self + } + + pub fn with_ack_sequence( + self, + port_id: PortId, + chan_id: ChannelId, + seq_number: Sequence, + ) -> Self { + let mut next_sequence_ack = self.ibc_store.lock().unwrap().next_sequence_send.clone(); + next_sequence_ack.insert((port_id, chan_id), seq_number); + self.ibc_store.lock().unwrap().next_sequence_ack = next_sequence_ack; + self + } + + pub fn with_height(self, target_height: Height) -> Self { + let latest_height = self.latest_height(); + if target_height.revision_number > latest_height.revision_number { + unimplemented!() + } else if target_height.revision_number < latest_height.revision_number { + panic!("Cannot rewind history of the chain to a smaller revision number!") + } else if target_height.revision_height < latest_height.revision_height { + panic!("Cannot rewind history of the chain to a smaller revision height!") + } else if target_height.revision_height > latest_height.revision_height { + // Repeatedly advance the host chain height till we hit the desired height + let mut ctx = MockContext { ..self }; + while ctx.latest_height().revision_height < target_height.revision_height { + ctx.advance_host_chain_height() + } + ctx + } else { + // Both the revision number and height match + self + } + } + + pub fn with_packet_commitment( + self, + port_id: PortId, + chan_id: ChannelId, + seq: Sequence, + data: PacketCommitment, + ) -> Self { + let mut packet_commitment = self.ibc_store.lock().unwrap().packet_commitment.clone(); + packet_commitment.insert((port_id, chan_id, seq), data); + self.ibc_store.lock().unwrap().packet_commitment = packet_commitment; + self + } + + pub fn with_router(self, router: MockRouter) -> Self { + Self { router, ..self } + } + + /// Accessor for a block of the local (host) chain from this context. + /// Returns `None` if the block at the requested height does not exist. + pub fn host_block(&self, target_height: Height) -> Option<&C::HostBlock> { + let target = target_height.revision_height as usize; + let latest = self.latest_height().revision_height as usize; + + // Check that the block is not too advanced, nor has it been pruned. + if (target > latest) || (target <= latest - self.history.len()) { + None // Block for requested height does not exist in history. + } else { + Some(&self.history[self.history.len() + target - latest - 1]) + } + } + + /// Triggers the advancing of the host chain, by extending the history of blocks (or headers). + pub fn advance_host_chain_height(&mut self) { + let latest_block = self.history.last().expect("history cannot be empty"); + let new_block = ::HostBlock::generate_block( + self.host_chain_id.clone(), + self.host_chain_type, + latest_block.height().increment().revision_height, + latest_block.timestamp().add(self.block_time).unwrap(), + ); + + // Append the new header at the tip of the history. + if self.history.len() >= self.max_history_size { + // History is full, we rotate and replace the tip with the new header. + self.history.rotate_left(1); + self.history[self.max_history_size - 1] = new_block; + } else { + // History is not full yet. + self.history.push(new_block); + } + } + + /// A datagram passes from the relayer to the IBC module (on host chain). + /// Alternative method to `Ics18Context::send` that does not exercise any serialization. + /// Used in testing the Ics18 algorithms, hence this may return a Ics18Error. + pub fn deliver(&mut self, msg: Ics26Envelope>) -> Result<(), Ics26Error> { + dispatch(self, msg)?; + // Create a new block. + self.advance_host_chain_height(); + Ok(()) + } + + /// Validates this context. Should be called after the context is mutated by a test. + pub fn validate(&self) -> Result<(), String> { + // Check that the number of entries is not higher than window size. + if self.history.len() > self.max_history_size { + return Err("too many entries".to_string()); + } + + // Check the content of the history. + if !self.history.is_empty() { + // Get the highest block. + let lh = &self.history[self.history.len() - 1]; + // Check latest is properly updated with highest header height. + if lh.height() != self.latest_height() { + return Err("latest height is not updated".to_string()); + } + } + + // Check that headers in the history are in sequential order. + for i in 1..self.history.len() { + let ph = &self.history[i - 1]; + let h = &self.history[i]; + if ph.height().increment() != h.height() { + return Err("headers in history not sequential".to_string()); + } + } + Ok(()) + } + + pub fn add_port(&mut self, port_id: PortId) { + let module_id = ModuleId::new(format!("module{}", port_id).into()).unwrap(); + self.ibc_store.lock().unwrap().port_to_module.insert(port_id, module_id); + } + + pub fn scope_port_to_module(&mut self, port_id: PortId, module_id: ModuleId) { + self.ibc_store.lock().unwrap().port_to_module.insert(port_id, module_id); + } + + pub fn consensus_states(&self, client_id: &ClientId) -> Vec> { + self.ibc_store.lock().unwrap().clients[client_id] + .consensus_states + .iter() + .map(|(k, v)| AnyConsensusStateWithHeight { height: *k, consensus_state: v.clone() }) + .collect() + } + + pub fn latest_client_states(&self, client_id: &ClientId) -> C::AnyClientState { + self.ibc_store.lock().unwrap().clients[client_id] + .client_state + .as_ref() + .unwrap() + .clone() + } + + pub fn latest_consensus_states( + &self, + client_id: &ClientId, + height: &Height, + ) -> C::AnyConsensusState { + self.ibc_store.lock().unwrap().clients[client_id] + .consensus_states + .get(height) + .unwrap() + .clone() + } + + #[inline] + pub fn latest_height(&self) -> Height { + self.history.last().expect("history cannot be empty").height() + } + + pub fn ibc_store_share(&self) -> Arc>> { + self.ibc_store.clone() + } +} + +impl MockContext +where + C::AnyClientState: From, + C::AnyConsensusState: From, +{ + /// Associates a client record to this context. + /// Given a client id and a height, registers a new client in the context and also associates + /// to this client a mock client state and a mock consensus state for height `height`. The type + /// of this client is implicitly assumed to be Mock. + pub fn with_client(self, client_id: &ClientId, height: Height) -> Self { + self.with_client_parametrized( + client_id, + height, + Some(MockClientState::client_type()), + Some(height), + ) + } + + /// Similar to `with_client`, this function associates a client record to this context, but + /// additionally permits to parametrize two details of the client. If `client_type` is None, + /// then the client will have type Mock, otherwise the specified type. If + /// `consensus_state_height` is None, then the client will be initialized with a consensus + /// state matching the same height as the client state (`client_state_height`). + pub fn with_client_parametrized( + self, + client_id: &ClientId, + client_state_height: Height, + client_type: Option, + consensus_state_height: Option, + ) -> Self { + let cs_height = consensus_state_height.unwrap_or(client_state_height); + + let client_type = client_type.unwrap_or(MockClientState::client_type()); + let (client_state, consensus_state) = match client_type.clone() { + // If it's a mock client, create the corresponding mock states. + client_type if client_type == MockClientState::client_type() => ( + Some(MockClientState::new(MockHeader::new(client_state_height).into()).into()), + MockConsensusState::new(MockHeader::new(cs_height)).into(), + ), + _ => unimplemented!(), + }; + let consensus_states = vec![(cs_height, consensus_state)].into_iter().collect(); + + debug!("consensus states: {:?}", consensus_states); + + let client_record = MockClientRecord { client_type, client_state, consensus_states }; + self.ibc_store.lock().unwrap().clients.insert(client_id.clone(), client_record); + self + } } /// An object that stores all IBC related data. #[derive(Clone, Debug, Default)] -pub struct MockIbcStore { - /// The set of all clients, indexed by their id. - pub clients: BTreeMap, +pub struct MockIbcStore { + /// The set of all clients, indexed by their id. + pub clients: BTreeMap>, - /// Tracks the processed time for clients header updates - pub client_processed_times: BTreeMap<(ClientId, Height), Timestamp>, + /// Tracks the processed time for clients header updates + pub client_processed_times: BTreeMap<(ClientId, Height), Timestamp>, - /// Tracks the processed height for the clients - pub client_processed_heights: BTreeMap<(ClientId, Height), Height>, + /// Tracks the processed height for the clients + pub client_processed_heights: BTreeMap<(ClientId, Height), Height>, - /// Counter for the client identifiers, necessary for `increase_client_counter` and the - /// `client_counter` methods. - pub client_ids_counter: u64, + /// Counter for the client identifiers, necessary for `increase_client_counter` and the + /// `client_counter` methods. + pub client_ids_counter: u64, - /// Association between client ids and connection ids. - pub client_connections: BTreeMap, + /// Association between client ids and connection ids. + pub client_connections: BTreeMap, - /// All the connections in the store. - pub connections: BTreeMap, + /// All the connections in the store. + pub connections: BTreeMap, - /// Counter for connection identifiers (see `increase_connection_counter`). - pub connection_ids_counter: u64, + /// Counter for connection identifiers (see `increase_connection_counter`). + pub connection_ids_counter: u64, - /// Association between connection ids and channel ids. - pub connection_channels: BTreeMap>, + /// Association between connection ids and channel ids. + pub connection_channels: BTreeMap>, - /// Counter for channel identifiers (see `increase_channel_counter`). - pub channel_ids_counter: u64, + /// Counter for channel identifiers (see `increase_channel_counter`). + pub channel_ids_counter: u64, - /// All the channels in the store. TODO Make new key PortId X ChanneId - pub channels: BTreeMap<(PortId, ChannelId), ChannelEnd>, + /// All the channels in the store. TODO Make new key PortId X ChanneId + pub channels: BTreeMap<(PortId, ChannelId), ChannelEnd>, - /// Tracks the sequence number for the next packet to be sent. - pub next_sequence_send: BTreeMap<(PortId, ChannelId), Sequence>, + /// Tracks the sequence number for the next packet to be sent. + pub next_sequence_send: BTreeMap<(PortId, ChannelId), Sequence>, - /// Tracks the sequence number for the next packet to be received. - pub next_sequence_recv: BTreeMap<(PortId, ChannelId), Sequence>, + /// Tracks the sequence number for the next packet to be received. + pub next_sequence_recv: BTreeMap<(PortId, ChannelId), Sequence>, - /// Tracks the sequence number for the next packet to be acknowledged. - pub next_sequence_ack: BTreeMap<(PortId, ChannelId), Sequence>, + /// Tracks the sequence number for the next packet to be acknowledged. + pub next_sequence_ack: BTreeMap<(PortId, ChannelId), Sequence>, - pub packet_acknowledgement: BTreeMap<(PortId, ChannelId, Sequence), AcknowledgementCommitment>, + pub packet_acknowledgement: BTreeMap<(PortId, ChannelId, Sequence), AcknowledgementCommitment>, - /// Maps ports to the the module that owns it - pub port_to_module: BTreeMap, + /// Maps ports to the the module that owns it + pub port_to_module: BTreeMap, - /// Constant-size commitments to packets data fields - pub packet_commitment: BTreeMap<(PortId, ChannelId, Sequence), PacketCommitment>, + /// Constant-size commitments to packets data fields + pub packet_commitment: BTreeMap<(PortId, ChannelId, Sequence), PacketCommitment>, - // Used by unordered channel - pub packet_receipt: BTreeMap<(PortId, ChannelId, Sequence), Receipt>, + // Used by unordered channel + pub packet_receipt: BTreeMap<(PortId, ChannelId, Sequence), Receipt>, } #[derive(Default)] pub struct MockRouterBuilder(MockRouter); impl RouterBuilder for MockRouterBuilder { - type Router = MockRouter; - - fn add_route(mut self, module_id: ModuleId, module: impl Module) -> Result { - match self.0 .0.insert(module_id, Arc::new(module)) { - None => Ok(self), - Some(_) => Err("Duplicate module_id".to_owned()), - } - } - - fn build(self) -> Self::Router { - self.0 - } + type Router = MockRouter; + + fn add_route(mut self, module_id: ModuleId, module: impl Module) -> Result { + match self.0 .0.insert(module_id, Arc::new(module)) { + None => Ok(self), + Some(_) => Err("Duplicate module_id".to_owned()), + } + } + + fn build(self) -> Self::Router { + self.0 + } } -#[derive(Clone, Debug, Default)] +#[derive(Clone, Default)] pub struct MockRouter(BTreeMap>); +impl Debug for MockRouter { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{:?}", self.0.keys().collect::>()) + } +} + impl Router for MockRouter { - fn get_route_mut(&mut self, module_id: &impl Borrow) -> Option<&mut dyn Module> { - self.0.get_mut(module_id.borrow()).and_then(Arc::get_mut) - } + fn get_route_mut(&mut self, module_id: &impl Borrow) -> Option<&mut dyn Module> { + self.0.get_mut(module_id.borrow()).and_then(Arc::get_mut) + } - fn has_route(&self, module_id: &impl Borrow) -> bool { - self.0.get(module_id.borrow()).is_some() - } + fn has_route(&self, module_id: &impl Borrow) -> bool { + self.0.get(module_id.borrow()).is_some() + } } -impl Ics26Context for MockContext { - type Router = MockRouter; +impl ReaderContext for MockContext +where + C::AnyClientMessage: TryFrom + Into + From, + C::AnyClientState: Eq + TryFrom + Into, + C::AnyConsensusState: + Eq + TryFrom + Into + From + 'static, +{ +} - fn router(&self) -> &Self::Router { - &self.router - } +impl Ics26Context for MockContext +where + C::AnyClientMessage: TryFrom + Into + From, + C::AnyClientState: Eq + TryFrom + Into, + C::AnyConsensusState: + Eq + TryFrom + Into + From + 'static, +{ + type Router = MockRouter; + + fn router(&self) -> &Self::Router { + &self.router + } + + fn router_mut(&mut self) -> &mut Self::Router { + &mut self.router + } +} - fn router_mut(&mut self) -> &mut Self::Router { - &mut self.router - } +impl PortReader for MockContext { + fn lookup_module_by_port(&self, port_id: &PortId) -> Result { + match self.ibc_store.lock().unwrap().port_to_module.get(port_id) { + Some(mod_id) => Ok(mod_id.clone()), + None => Err(Ics05Error::unknown_port(port_id.clone())), + } + } } -impl PortReader for MockContext { - fn lookup_module_by_port(&self, port_id: &PortId) -> Result { - match self.ibc_store.lock().unwrap().port_to_module.get(port_id) { - Some(mod_id) => Ok(mod_id.clone()), - None => Err(Ics05Error::unknown_port(port_id.clone())), - } - } +impl ChannelReader for MockContext { + fn channel_end(&self, pcid: &(PortId, ChannelId)) -> Result { + match self.ibc_store.lock().unwrap().channels.get(pcid) { + Some(channel_end) => Ok(channel_end.clone()), + None => Err(Ics04Error::channel_not_found(pcid.0.clone(), pcid.1)), + } + } + + fn connection_channels( + &self, + cid: &ConnectionId, + ) -> Result, Ics04Error> { + match self.ibc_store.lock().unwrap().connection_channels.get(cid) { + Some(pcid) => Ok(pcid.clone()), + None => Err(Ics04Error::missing_channel()), + } + } + + fn get_next_sequence_send( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result { + match self.ibc_store.lock().unwrap().next_sequence_send.get(port_channel_id) { + Some(sequence) => Ok(*sequence), + None => Err(Ics04Error::missing_next_send_seq(port_channel_id.clone())), + } + } + + fn get_next_sequence_recv( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result { + match self.ibc_store.lock().unwrap().next_sequence_recv.get(port_channel_id) { + Some(sequence) => Ok(*sequence), + None => Err(Ics04Error::missing_next_recv_seq(port_channel_id.clone())), + } + } + + fn get_next_sequence_ack( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result { + match self.ibc_store.lock().unwrap().next_sequence_ack.get(port_channel_id) { + Some(sequence) => Ok(*sequence), + None => Err(Ics04Error::missing_next_ack_seq(port_channel_id.clone())), + } + } + + fn get_packet_commitment( + &self, + key: &(PortId, ChannelId, Sequence), + ) -> Result { + match self.ibc_store.lock().unwrap().packet_commitment.get(key) { + Some(commitment) => Ok(commitment.clone()), + None => Err(Ics04Error::packet_commitment_not_found(key.2)), + } + } + + fn get_packet_receipt( + &self, + key: &(PortId, ChannelId, Sequence), + ) -> Result { + match self.ibc_store.lock().unwrap().packet_receipt.get(key) { + Some(receipt) => Ok(receipt.clone()), + None => Err(Ics04Error::packet_receipt_not_found(key.2)), + } + } + + fn get_packet_acknowledgement( + &self, + key: &(PortId, ChannelId, Sequence), + ) -> Result { + match self.ibc_store.lock().unwrap().packet_acknowledgement.get(key) { + Some(ack) => Ok(ack.clone()), + None => Err(Ics04Error::packet_acknowledgement_not_found(key.2)), + } + } + + fn hash(&self, value: Vec) -> Vec { + sha2::Sha256::digest(value).to_vec() + } + + fn client_update_time( + &self, + client_id: &ClientId, + height: Height, + ) -> Result { + match self + .ibc_store + .lock() + .unwrap() + .client_processed_times + .get(&(client_id.clone(), height)) + { + Some(time) => Ok(*time), + None => Err(Ics04Error::processed_time_not_found(client_id.clone(), height)), + } + } + + fn client_update_height( + &self, + client_id: &ClientId, + height: Height, + ) -> Result { + match self + .ibc_store + .lock() + .unwrap() + .client_processed_heights + .get(&(client_id.clone(), height)) + { + Some(height) => Ok(*height), + None => Err(Ics04Error::processed_height_not_found(client_id.clone(), height)), + } + } + + fn channel_counter(&self) -> Result { + Ok(self.ibc_store.lock().unwrap().channel_ids_counter) + } + + fn max_expected_time_per_block(&self) -> Duration { + self.block_time + } } -impl ChannelReader for MockContext { - fn channel_end(&self, pcid: &(PortId, ChannelId)) -> Result { - match self.ibc_store.lock().unwrap().channels.get(pcid) { - Some(channel_end) => Ok(channel_end.clone()), - None => Err(Ics04Error::channel_not_found(pcid.0.clone(), pcid.1)), - } - } - - fn connection_end(&self, cid: &ConnectionId) -> Result { - ConnectionReader::connection_end(self, cid).map_err(Ics04Error::ics03_connection) - } - - fn connection_channels( - &self, - cid: &ConnectionId, - ) -> Result, Ics04Error> { - match self.ibc_store.lock().unwrap().connection_channels.get(cid) { - Some(pcid) => Ok(pcid.clone()), - None => Err(Ics04Error::missing_channel()), - } - } - - fn client_state(&self, client_id: &ClientId) -> Result { - ClientReader::client_state(self, client_id) - .map_err(|e| Ics04Error::ics03_connection(Ics03Error::ics02_client(e))) - } - - fn client_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result { - ClientReader::consensus_state(self, client_id, height) - .map_err(|e| Ics04Error::ics03_connection(Ics03Error::ics02_client(e))) - } - - fn get_next_sequence_send( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Result { - match self - .ibc_store - .lock() - .unwrap() - .next_sequence_send - .get(port_channel_id) - { - Some(sequence) => Ok(*sequence), - None => Err(Ics04Error::missing_next_send_seq(port_channel_id.clone())), - } - } - - fn get_next_sequence_recv( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Result { - match self - .ibc_store - .lock() - .unwrap() - .next_sequence_recv - .get(port_channel_id) - { - Some(sequence) => Ok(*sequence), - None => Err(Ics04Error::missing_next_recv_seq(port_channel_id.clone())), - } - } - - fn get_next_sequence_ack( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Result { - match self - .ibc_store - .lock() - .unwrap() - .next_sequence_ack - .get(port_channel_id) - { - Some(sequence) => Ok(*sequence), - None => Err(Ics04Error::missing_next_ack_seq(port_channel_id.clone())), - } - } - - fn get_packet_commitment( - &self, - key: &(PortId, ChannelId, Sequence), - ) -> Result { - match self.ibc_store.lock().unwrap().packet_commitment.get(key) { - Some(commitment) => Ok(commitment.clone()), - None => Err(Ics04Error::packet_commitment_not_found(key.2)), - } - } - - fn get_packet_receipt( - &self, - key: &(PortId, ChannelId, Sequence), - ) -> Result { - match self.ibc_store.lock().unwrap().packet_receipt.get(key) { - Some(receipt) => Ok(receipt.clone()), - None => Err(Ics04Error::packet_receipt_not_found(key.2)), - } - } - - fn get_packet_acknowledgement( - &self, - key: &(PortId, ChannelId, Sequence), - ) -> Result { - match self - .ibc_store - .lock() - .unwrap() - .packet_acknowledgement - .get(key) - { - Some(ack) => Ok(ack.clone()), - None => Err(Ics04Error::packet_acknowledgement_not_found(key.2)), - } - } - - fn hash(&self, value: Vec) -> Vec { - sha2::Sha256::digest(value).to_vec() - } - - fn host_height(&self) -> Height { - self.latest_height() - } - - fn host_timestamp(&self) -> Timestamp { - ClientReader::host_timestamp(self) - } - - fn host_consensus_state(&self, height: Height) -> Result { - ConnectionReader::host_consensus_state(self, height).map_err(Ics04Error::ics03_connection) - } - - fn pending_host_consensus_state(&self) -> Result { - ClientReader::pending_host_consensus_state(self) - .map_err(|e| Ics04Error::ics03_connection(Ics03Error::ics02_client(e))) - } - - fn client_update_time( - &self, - client_id: &ClientId, - height: Height, - ) -> Result { - match self - .ibc_store - .lock() - .unwrap() - .client_processed_times - .get(&(client_id.clone(), height)) - { - Some(time) => Ok(*time), - None => Err(Ics04Error::processed_time_not_found( - client_id.clone(), - height, - )), - } - } - - fn client_update_height( - &self, - client_id: &ClientId, - height: Height, - ) -> Result { - match self - .ibc_store - .lock() - .unwrap() - .client_processed_heights - .get(&(client_id.clone(), height)) - { - Some(height) => Ok(*height), - None => Err(Ics04Error::processed_height_not_found( - client_id.clone(), - height, - )), - } - } - - fn channel_counter(&self) -> Result { - Ok(self.ibc_store.lock().unwrap().channel_ids_counter) - } - - fn max_expected_time_per_block(&self) -> Duration { - self.block_time - } +impl ChannelKeeper for MockContext { + fn store_packet_commitment( + &mut self, + key: (PortId, ChannelId, Sequence), + commitment: PacketCommitment, + ) -> Result<(), Ics04Error> { + self.ibc_store.lock().unwrap().packet_commitment.insert(key, commitment); + Ok(()) + } + + fn store_packet_acknowledgement( + &mut self, + key: (PortId, ChannelId, Sequence), + ack_commitment: AcknowledgementCommitment, + ) -> Result<(), Ics04Error> { + self.ibc_store + .lock() + .unwrap() + .packet_acknowledgement + .insert(key, ack_commitment); + Ok(()) + } + + fn delete_packet_acknowledgement( + &mut self, + key: (PortId, ChannelId, Sequence), + ) -> Result<(), Ics04Error> { + self.ibc_store.lock().unwrap().packet_acknowledgement.remove(&key); + Ok(()) + } + + fn store_connection_channels( + &mut self, + cid: ConnectionId, + port_channel_id: &(PortId, ChannelId), + ) -> Result<(), Ics04Error> { + self.ibc_store + .lock() + .unwrap() + .connection_channels + .entry(cid) + .or_insert_with(Vec::new) + .push(port_channel_id.clone()); + Ok(()) + } + + fn store_channel( + &mut self, + port_channel_id: (PortId, ChannelId), + channel_end: &ChannelEnd, + ) -> Result<(), Ics04Error> { + self.ibc_store + .lock() + .unwrap() + .channels + .insert(port_channel_id, channel_end.clone()); + Ok(()) + } + + fn store_next_sequence_send( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Ics04Error> { + self.ibc_store.lock().unwrap().next_sequence_send.insert(port_channel_id, seq); + Ok(()) + } + + fn store_next_sequence_recv( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Ics04Error> { + self.ibc_store.lock().unwrap().next_sequence_recv.insert(port_channel_id, seq); + Ok(()) + } + + fn store_next_sequence_ack( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Ics04Error> { + self.ibc_store.lock().unwrap().next_sequence_ack.insert(port_channel_id, seq); + Ok(()) + } + + fn increase_channel_counter(&mut self) { + self.ibc_store.lock().unwrap().channel_ids_counter += 1; + } + + fn delete_packet_commitment( + &mut self, + key: (PortId, ChannelId, Sequence), + ) -> Result<(), Ics04Error> { + self.ibc_store.lock().unwrap().packet_commitment.remove(&key); + Ok(()) + } + + fn store_packet_receipt( + &mut self, + key: (PortId, ChannelId, Sequence), + receipt: Receipt, + ) -> Result<(), Ics04Error> { + self.ibc_store.lock().unwrap().packet_receipt.insert(key, receipt); + Ok(()) + } + + fn store_send_packet( + &mut self, + _key: (PortId, ChannelId, Sequence), + _packet: crate::core::ics04_channel::packet::Packet, + ) -> Result<(), Ics04Error> { + Ok(()) + } + + fn store_recv_packet( + &mut self, + _key: (PortId, ChannelId, Sequence), + _packet: crate::core::ics04_channel::packet::Packet, + ) -> Result<(), Ics04Error> { + Ok(()) + } } -impl ChannelKeeper for MockContext { - fn store_packet_commitment( - &mut self, - key: (PortId, ChannelId, Sequence), - commitment: PacketCommitment, - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .packet_commitment - .insert(key, commitment); - Ok(()) - } - - fn store_packet_acknowledgement( - &mut self, - key: (PortId, ChannelId, Sequence), - ack_commitment: AcknowledgementCommitment, - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .packet_acknowledgement - .insert(key, ack_commitment); - Ok(()) - } - - fn delete_packet_acknowledgement( - &mut self, - key: (PortId, ChannelId, Sequence), - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .packet_acknowledgement - .remove(&key); - Ok(()) - } - - fn store_connection_channels( - &mut self, - cid: ConnectionId, - port_channel_id: &(PortId, ChannelId), - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .connection_channels - .entry(cid) - .or_insert_with(Vec::new) - .push(port_channel_id.clone()); - Ok(()) - } - - fn store_channel( - &mut self, - port_channel_id: (PortId, ChannelId), - channel_end: &ChannelEnd, - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .channels - .insert(port_channel_id, channel_end.clone()); - Ok(()) - } - - fn store_next_sequence_send( - &mut self, - port_channel_id: (PortId, ChannelId), - seq: Sequence, - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .next_sequence_send - .insert(port_channel_id, seq); - Ok(()) - } - - fn store_next_sequence_recv( - &mut self, - port_channel_id: (PortId, ChannelId), - seq: Sequence, - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .next_sequence_recv - .insert(port_channel_id, seq); - Ok(()) - } - - fn store_next_sequence_ack( - &mut self, - port_channel_id: (PortId, ChannelId), - seq: Sequence, - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .next_sequence_ack - .insert(port_channel_id, seq); - Ok(()) - } - - fn increase_channel_counter(&mut self) { - self.ibc_store.lock().unwrap().channel_ids_counter += 1; - } - - fn delete_packet_commitment( - &mut self, - key: (PortId, ChannelId, Sequence), - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .packet_commitment - .remove(&key); - Ok(()) - } - - fn store_packet_receipt( - &mut self, - key: (PortId, ChannelId, Sequence), - receipt: Receipt, - ) -> Result<(), Ics04Error> { - self.ibc_store - .lock() - .unwrap() - .packet_receipt - .insert(key, receipt); - Ok(()) - } +impl ConnectionReader for MockContext +where + C::AnyClientMessage: TryFrom + Into + From, + C::AnyClientState: Eq + TryFrom + Into, + C::AnyConsensusState: + Eq + TryFrom + Into + From + 'static, +{ + fn connection_end(&self, cid: &ConnectionId) -> Result { + match self.ibc_store.lock().unwrap().connections.get(cid) { + Some(connection_end) => Ok(connection_end.clone()), + None => Err(Ics03Error::connection_not_found(cid.clone())), + } + } + + fn host_oldest_height(&self) -> Height { + // history must be non-empty, so `self.history[0]` is valid + self.history[0].height() + } + + fn commitment_prefix(&self) -> CommitmentPrefix { + CommitmentPrefix::try_from(b"mock".to_vec()).unwrap() + } + + fn connection_counter(&self) -> Result { + Ok(self.ibc_store.lock().unwrap().connection_ids_counter) + } } -impl ConnectionReader for MockContext { - fn connection_end(&self, cid: &ConnectionId) -> Result { - match self.ibc_store.lock().unwrap().connections.get(cid) { - Some(connection_end) => Ok(connection_end.clone()), - None => Err(Ics03Error::connection_not_found(cid.clone())), - } - } - - fn client_state(&self, client_id: &ClientId) -> Result { - // Forward method call to the Ics2 Client-specific method. - ClientReader::client_state(self, client_id).map_err(Ics03Error::ics02_client) - } - - fn host_current_height(&self) -> Height { - self.latest_height() - } - - fn host_oldest_height(&self) -> Height { - // history must be non-empty, so `self.history[0]` is valid - self.history[0].height() - } - - fn commitment_prefix(&self) -> CommitmentPrefix { - CommitmentPrefix::try_from(b"mock".to_vec()).unwrap() - } - - fn client_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result { - // Forward method call to the Ics2Client-specific method. - self.consensus_state(client_id, height) - .map_err(Ics03Error::ics02_client) - } - - fn host_consensus_state(&self, height: Height) -> Result { - ClientReader::host_consensus_state(self, height).map_err(Ics03Error::ics02_client) - } - - fn connection_counter(&self) -> Result { - Ok(self.ibc_store.lock().unwrap().connection_ids_counter) - } +impl ConnectionKeeper for MockContext { + fn store_connection( + &mut self, + connection_id: ConnectionId, + connection_end: &ConnectionEnd, + ) -> Result<(), Ics03Error> { + self.ibc_store + .lock() + .unwrap() + .connections + .insert(connection_id, connection_end.clone()); + Ok(()) + } + + fn store_connection_to_client( + &mut self, + connection_id: ConnectionId, + client_id: &ClientId, + ) -> Result<(), Ics03Error> { + self.ibc_store + .lock() + .unwrap() + .client_connections + .insert(client_id.clone(), connection_id); + Ok(()) + } + + fn increase_connection_counter(&mut self) { + self.ibc_store.lock().unwrap().connection_ids_counter += 1; + } } -impl ConnectionKeeper for MockContext { - fn store_connection( - &mut self, - connection_id: ConnectionId, - connection_end: &ConnectionEnd, - ) -> Result<(), Ics03Error> { - self.ibc_store - .lock() - .unwrap() - .connections - .insert(connection_id, connection_end.clone()); - Ok(()) - } - - fn store_connection_to_client( - &mut self, - connection_id: ConnectionId, - client_id: &ClientId, - ) -> Result<(), Ics03Error> { - self.ibc_store - .lock() - .unwrap() - .client_connections - .insert(client_id.clone(), connection_id); - Ok(()) - } - - fn increase_connection_counter(&mut self) { - self.ibc_store.lock().unwrap().connection_ids_counter += 1; - } +impl ClientReader for MockContext +where + C::AnyClientMessage: TryFrom + Into + From, + C::AnyClientState: Eq + TryFrom + Into, + C::AnyConsensusState: + Eq + TryFrom + Into + From + 'static, +{ + fn client_type(&self, client_id: &ClientId) -> Result { + match self.ibc_store.lock().unwrap().clients.get(client_id) { + Some(client_record) => Ok(client_record.client_type.clone()), + None => Err(Ics02Error::client_not_found(client_id.clone())), + } + } + + fn client_state(&self, client_id: &ClientId) -> Result { + match self.ibc_store.lock().unwrap().clients.get(client_id) { + Some(client_record) => client_record + .client_state + .clone() + .ok_or_else(|| Ics02Error::client_not_found(client_id.clone())), + None => Err(Ics02Error::client_not_found(client_id.clone())), + } + } + + fn consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result { + match self.ibc_store.lock().unwrap().clients.get(client_id) { + Some(client_record) => match client_record.consensus_states.get(&height) { + Some(consensus_state) => Ok(consensus_state.clone()), + None => Err(Ics02Error::consensus_state_not_found(client_id.clone(), height)), + }, + None => Err(Ics02Error::consensus_state_not_found(client_id.clone(), height)), + } + } + + fn host_client_type(&self) -> String { + MockClientState::client_type().to_owned() + } + + /// Search for the lowest consensus state higher than `height`. + fn next_consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result, Ics02Error> { + let ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store + .clients + .get(client_id) + .ok_or_else(|| Ics02Error::client_not_found(client_id.clone()))?; + + // Get the consensus state heights and sort them in ascending order. + let mut heights: Vec = client_record.consensus_states.keys().cloned().collect(); + heights.sort(); + + // Search for next state. + for h in heights { + if h > height { + // unwrap should never happen, as the consensus state for h must exist + return Ok(Some(client_record.consensus_states.get(&h).unwrap().clone())); + } + } + Ok(None) + } + + /// Search for the highest consensus state lower than `height`. + fn prev_consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result, Ics02Error> { + let ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store + .clients + .get(client_id) + .ok_or_else(|| Ics02Error::client_not_found(client_id.clone()))?; + + // Get the consensus state heights and sort them in descending order. + let mut heights: Vec = client_record.consensus_states.keys().cloned().collect(); + heights.sort_by(|a, b| b.cmp(a)); + + // Search for previous state. + for h in heights { + if h < height { + // unwrap should never happen, as the consensus state for h must exist + return Ok(Some(client_record.consensus_states.get(&h).unwrap().clone())); + } + } + Ok(None) + } + + fn host_height(&self) -> Height { + self.latest_height() + } + + fn host_timestamp(&self) -> Timestamp { + self.history + .last() + .expect("history cannot be empty") + .timestamp() + .add(self.block_time) + .unwrap() + } + + fn host_consensus_state( + &self, + height: Height, + _proof: Option>, + ) -> Result { + match self.host_block(height) { + Some(block_ref) => Ok(block_ref.clone().into()), + None => Err(Ics02Error::missing_local_consensus_state(height)), + } + } + + fn client_counter(&self) -> Result { + Ok(self.ibc_store.lock().unwrap().client_ids_counter) + } } -impl ClientReader for MockContext { - fn client_type(&self, client_id: &ClientId) -> Result { - match self.ibc_store.lock().unwrap().clients.get(client_id) { - Some(client_record) => Ok(client_record.client_type), - None => Err(Ics02Error::client_not_found(client_id.clone())), - } - } - - fn client_state(&self, client_id: &ClientId) -> Result { - match self.ibc_store.lock().unwrap().clients.get(client_id) { - Some(client_record) => client_record - .client_state - .clone() - .ok_or_else(|| Ics02Error::client_not_found(client_id.clone())), - None => Err(Ics02Error::client_not_found(client_id.clone())), - } - } - - fn consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result { - match self.ibc_store.lock().unwrap().clients.get(client_id) { - Some(client_record) => match client_record.consensus_states.get(&height) { - Some(consensus_state) => Ok(consensus_state.clone()), - None => Err(Ics02Error::consensus_state_not_found( - client_id.clone(), - height, - )), - }, - None => Err(Ics02Error::consensus_state_not_found( - client_id.clone(), - height, - )), - } - } - - /// Search for the lowest consensus state higher than `height`. - fn next_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result, Ics02Error> { - let ibc_store = self.ibc_store.lock().unwrap(); - let client_record = ibc_store - .clients - .get(client_id) - .ok_or_else(|| Ics02Error::client_not_found(client_id.clone()))?; - - // Get the consensus state heights and sort them in ascending order. - let mut heights: Vec = client_record.consensus_states.keys().cloned().collect(); - heights.sort(); - - // Search for next state. - for h in heights { - if h > height { - // unwrap should never happen, as the consensus state for h must exist - return Ok(Some( - client_record.consensus_states.get(&h).unwrap().clone(), - )); - } - } - Ok(None) - } - - /// Search for the highest consensus state lower than `height`. - fn prev_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result, Ics02Error> { - let ibc_store = self.ibc_store.lock().unwrap(); - let client_record = ibc_store - .clients - .get(client_id) - .ok_or_else(|| Ics02Error::client_not_found(client_id.clone()))?; - - // Get the consensus state heights and sort them in descending order. - let mut heights: Vec = client_record.consensus_states.keys().cloned().collect(); - heights.sort_by(|a, b| b.cmp(a)); - - // Search for previous state. - for h in heights { - if h < height { - // unwrap should never happen, as the consensus state for h must exist - return Ok(Some( - client_record.consensus_states.get(&h).unwrap().clone(), - )); - } - } - Ok(None) - } - - fn host_height(&self) -> Height { - self.latest_height() - } - - fn host_timestamp(&self) -> Timestamp { - self.history - .last() - .expect("history cannot be empty") - .timestamp() - .add(self.block_time) - .unwrap() - } - - fn host_consensus_state(&self, height: Height) -> Result { - match self.host_block(height) { - Some(block_ref) => Ok(block_ref.clone().into()), - None => Err(Ics02Error::missing_local_consensus_state(height)), - } - } - - fn pending_host_consensus_state(&self) -> Result { - Err(Ics02Error::missing_local_consensus_state(Height::zero())) - } - - fn client_counter(&self) -> Result { - Ok(self.ibc_store.lock().unwrap().client_ids_counter) - } +#[derive(Debug, Eq, PartialEq, Clone, Default)] +pub struct MockClientTypes; + +pub trait HostBlockType: ClientTypes +where + Self: Clone + Debug + Eq, +{ + type HostBlock: HostBlock + Debug + Clone; +} + +impl ClientTypes for MockClientTypes { + type AnyClientMessage = AnyClientMessage; + type AnyClientState = AnyClientState; + type AnyConsensusState = AnyConsensusState; + type ClientDef = AnyClient; } -impl ClientKeeper for MockContext { - fn store_client_type( - &mut self, - client_id: ClientId, - client_type: ClientType, - ) -> Result<(), Ics02Error> { - let mut ibc_store = self.ibc_store.lock().unwrap(); - let client_record = ibc_store - .clients - .entry(client_id) - .or_insert(MockClientRecord { - client_type, - consensus_states: Default::default(), - client_state: Default::default(), - }); - - client_record.client_type = client_type; - Ok(()) - } - - fn store_client_state( - &mut self, - client_id: ClientId, - client_state: AnyClientState, - ) -> Result<(), Ics02Error> { - let mut ibc_store = self.ibc_store.lock().unwrap(); - let client_record = ibc_store - .clients - .entry(client_id) - .or_insert(MockClientRecord { - client_type: client_state.client_type(), - consensus_states: Default::default(), - client_state: Default::default(), - }); - - client_record.client_state = Some(client_state); - Ok(()) - } - - fn store_consensus_state( - &mut self, - client_id: ClientId, - height: Height, - consensus_state: AnyConsensusState, - ) -> Result<(), Ics02Error> { - let mut ibc_store = self.ibc_store.lock().unwrap(); - let client_record = ibc_store - .clients - .entry(client_id) - .or_insert(MockClientRecord { - client_type: ClientType::Mock, - consensus_states: Default::default(), - client_state: Default::default(), - }); - - client_record - .consensus_states - .insert(height, consensus_state); - Ok(()) - } - - fn increase_client_counter(&mut self) { - self.ibc_store.lock().unwrap().client_ids_counter += 1 - } - - fn store_update_time( - &mut self, - client_id: ClientId, - height: Height, - timestamp: Timestamp, - ) -> Result<(), Ics02Error> { - let _ = self - .ibc_store - .lock() - .unwrap() - .client_processed_times - .insert((client_id, height), timestamp); - Ok(()) - } - - fn store_update_height( - &mut self, - client_id: ClientId, - height: Height, - host_height: Height, - ) -> Result<(), Ics02Error> { - let _ = self - .ibc_store - .lock() - .unwrap() - .client_processed_heights - .insert((client_id, height), host_height); - Ok(()) - } +impl HostBlockType for MockClientTypes { + type HostBlock = MockHostBlock; } -impl Ics18Context for MockContext { - fn query_latest_height(&self) -> Height { - self.host_current_height() - } - - fn query_client_full_state(&self, client_id: &ClientId) -> Option { - // Forward call to Ics2. - ClientReader::client_state(self, client_id).ok() - } - - fn query_latest_header(&self) -> Option { - let block_ref = self.host_block(self.host_current_height()); - block_ref.cloned().map(Into::into) - } - - fn send(&mut self, msgs: Vec) -> Result, Ics18Error> { - // Forward call to Ics26 delivery method. - let mut all_events = vec![]; - for msg in msgs { - let MsgReceipt { mut events, .. } = - deliver(self, msg).map_err(Ics18Error::transaction_failed)?; - all_events.append(&mut events); - } - self.advance_host_chain_height(); // Advance chain height - Ok(all_events) - } - - fn signer(&self) -> Signer { - "0CDA3F47EF3C4906693B170EF650EB968C5F4B2C".parse().unwrap() - } +impl ClientTypes for MockContext { + type AnyClientMessage = C::AnyClientMessage; + type AnyClientState = C::AnyClientState; + type AnyConsensusState = C::AnyConsensusState; + type ClientDef = C::ClientDef; +} + +impl ClientKeeper for MockContext { + fn store_client_type( + &mut self, + client_id: ClientId, + client_type: ClientType, + ) -> Result<(), Ics02Error> { + let mut ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store.clients.entry(client_id).or_insert(MockClientRecord { + client_type: client_type.clone(), + consensus_states: Default::default(), + client_state: Default::default(), + }); + + client_record.client_type = client_type; + Ok(()) + } + + fn store_client_state( + &mut self, + client_id: ClientId, + client_state: C::AnyClientState, + ) -> Result<(), Ics02Error> { + let mut ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store.clients.entry(client_id).or_insert(MockClientRecord { + client_type: client_state.client_type(), + consensus_states: Default::default(), + client_state: Default::default(), + }); + + client_record.client_state = Some(client_state); + Ok(()) + } + + fn store_consensus_state( + &mut self, + client_id: ClientId, + height: Height, + consensus_state: C::AnyConsensusState, + ) -> Result<(), Ics02Error> { + let mut ibc_store = self.ibc_store.lock().unwrap(); + let client_record = ibc_store.clients.entry(client_id).or_insert(MockClientRecord { + client_type: MockClientState::client_type(), + consensus_states: Default::default(), + client_state: Default::default(), + }); + + client_record.consensus_states.insert(height, consensus_state); + Ok(()) + } + + fn increase_client_counter(&mut self) { + self.ibc_store.lock().unwrap().client_ids_counter += 1 + } + + fn store_update_time( + &mut self, + client_id: ClientId, + height: Height, + timestamp: Timestamp, + ) -> Result<(), Ics02Error> { + let _ = self + .ibc_store + .lock() + .unwrap() + .client_processed_times + .insert((client_id, height), timestamp); + Ok(()) + } + + fn store_update_height( + &mut self, + client_id: ClientId, + height: Height, + host_height: Height, + ) -> Result<(), Ics02Error> { + let _ = self + .ibc_store + .lock() + .unwrap() + .client_processed_heights + .insert((client_id, height), host_height); + Ok(()) + } + + fn validate_self_client(&self, _client_state: &C::AnyClientState) -> Result<(), Ics02Error> { + Ok(()) + } } #[cfg(test)] mod tests { - use test_log::test; - - use alloc::str::FromStr; - - use crate::core::ics04_channel::channel::{Counterparty, Order}; - use crate::core::ics04_channel::error::Error; - use crate::core::ics04_channel::packet::Packet; - use crate::core::ics04_channel::Version; - use crate::core::ics24_host::identifier::ChainId; - use crate::core::ics24_host::identifier::{ChannelId, ConnectionId, PortId}; - use crate::core::ics26_routing::context::{ - Acknowledgement, Module, ModuleId, ModuleOutputBuilder, OnRecvPacketAck, Router, - RouterBuilder, - }; - use crate::mock::context::MockContext; - use crate::mock::context::MockRouterBuilder; - use crate::mock::host::HostType; - use crate::prelude::*; - use crate::signer::Signer; - use crate::test_utils::get_dummy_bech32_account; - use crate::Height; - - #[test] - fn test_history_manipulation() { - pub struct Test { - name: String, - ctx: MockContext, - } - let cv = 1; // The version to use for all chains. - - let tests: Vec = vec![ - Test { - name: "Empty history, small pruning window".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::Mock, - 2, - Height::new(cv, 1), - ), - }, - Test { - name: "[Synthetic TM host] Empty history, small pruning window".to_string(), - ctx: MockContext::new( - ChainId::new("mocksgaia".to_string(), cv), - HostType::SyntheticTendermint, - 2, - Height::new(cv, 1), - ), - }, - Test { - name: "Large pruning window".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::Mock, - 30, - Height::new(cv, 2), - ), - }, - Test { - name: "[Synthetic TM host] Large pruning window".to_string(), - ctx: MockContext::new( - ChainId::new("mocksgaia".to_string(), cv), - HostType::SyntheticTendermint, - 30, - Height::new(cv, 2), - ), - }, - Test { - name: "Small pruning window".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::Mock, - 3, - Height::new(cv, 30), - ), - }, - Test { - name: "[Synthetic TM host] Small pruning window".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::SyntheticTendermint, - 3, - Height::new(cv, 30), - ), - }, - Test { - name: "Small pruning window, small starting height".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::Mock, - 3, - Height::new(cv, 2), - ), - }, - Test { - name: "[Synthetic TM host] Small pruning window, small starting height".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::SyntheticTendermint, - 3, - Height::new(cv, 2), - ), - }, - Test { - name: "Large pruning window, large starting height".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::Mock, - 50, - Height::new(cv, 2000), - ), - }, - Test { - name: "[Synthetic TM host] Large pruning window, large starting height".to_string(), - ctx: MockContext::new( - ChainId::new("mockgaia".to_string(), cv), - HostType::SyntheticTendermint, - 50, - Height::new(cv, 2000), - ), - }, - ]; - - for mut test in tests { - // All tests should yield a valid context after initialization. - assert!( - test.ctx.validate().is_ok(), - "failed in test {} while validating context {:?}", - test.name, - test.ctx - ); - - let current_height = test.ctx.latest_height(); - - // After advancing the chain's height, the context should still be valid. - test.ctx.advance_host_chain_height(); - assert!( - test.ctx.validate().is_ok(), - "failed in test {} while validating context {:?}", - test.name, - test.ctx - ); - - let next_height = current_height.increment(); - assert_eq!( - test.ctx.latest_height(), - next_height, - "failed while increasing height for context {:?}", - test.ctx - ); - if current_height > Height::new(cv, 0) { - assert_eq!( - test.ctx.host_block(current_height).unwrap().height(), - current_height, - "failed while fetching height {:?} of context {:?}", - current_height, - test.ctx - ); - } - } - } - - #[test] - fn test_router() { - #[derive(Default)] - struct MockAck(Vec); - - impl AsRef<[u8]> for MockAck { - fn as_ref(&self) -> &[u8] { - self.0.as_slice() - } - } - - impl Acknowledgement for MockAck {} - - #[derive(Debug, Default)] - struct FooModule { - counter: usize, - } - - impl Module for FooModule { - fn on_chan_open_try( - &mut self, - _output: &mut ModuleOutputBuilder, - _order: Order, - _connection_hops: &[ConnectionId], - _port_id: &PortId, - _channel_id: &ChannelId, - _counterparty: &Counterparty, - _version: &Version, - counterparty_version: &Version, - ) -> Result { - Ok(counterparty_version.clone()) - } - - fn on_recv_packet( - &self, - _output: &mut ModuleOutputBuilder, - _packet: &Packet, - _relayer: &Signer, - ) -> OnRecvPacketAck { - OnRecvPacketAck::Successful( - Box::new(MockAck::default()), - Box::new(|module| { - let module = module.downcast_mut::().unwrap(); - module.counter += 1; - Ok(()) - }), - ) - } - } - - #[derive(Debug, Default)] - struct BarModule; - - impl Module for BarModule { - fn on_chan_open_try( - &mut self, - _output: &mut ModuleOutputBuilder, - _order: Order, - _connection_hops: &[ConnectionId], - _port_id: &PortId, - _channel_id: &ChannelId, - _counterparty: &Counterparty, - _version: &Version, - counterparty_version: &Version, - ) -> Result { - Ok(counterparty_version.clone()) - } - } - - let r = MockRouterBuilder::default() - .add_route("foomodule".parse().unwrap(), FooModule::default()) - .unwrap() - .add_route("barmodule".parse().unwrap(), BarModule::default()) - .unwrap() - .build(); - - let mut ctx = MockContext::new( - ChainId::new("mockgaia".to_string(), 1), - HostType::Mock, - 1, - Height::new(1, 1), - ) - .with_router(r); - - let mut on_recv_packet_result = |module_id: &'static str| { - let module_id = ModuleId::from_str(module_id).unwrap(); - let m = ctx.router.get_route_mut(&module_id).unwrap(); - let result = m.on_recv_packet( - &mut ModuleOutputBuilder::new(), - &Packet::default(), - &get_dummy_bech32_account().parse().unwrap(), - ); - (module_id, result) - }; - - let results = vec![ - on_recv_packet_result("foomodule"), - on_recv_packet_result("barmodule"), - ]; - results - .into_iter() - .filter_map(|(mid, result)| match result { - OnRecvPacketAck::Nil(write_fn) | OnRecvPacketAck::Successful(_, write_fn) => { - Some((mid, write_fn)) - } - _ => None, - }) - .for_each(|(mid, write_fn)| { - write_fn(ctx.router.get_route_mut(&mid).unwrap().as_any_mut()).unwrap() - }); - } + use test_log::test; + + use alloc::str::FromStr; + + use crate::{ + core::{ + ics04_channel::{ + channel::{Counterparty, Order}, + error::Error, + packet::Packet, + Version, + }, + ics24_host::identifier::{ChainId, ChannelId, ConnectionId, PortId}, + ics26_routing::context::{ + Acknowledgement, Module, ModuleId, ModuleOutputBuilder, OnRecvPacketAck, Router, + RouterBuilder, + }, + }, + mock::{ + context::{MockClientTypes, MockContext, MockRouterBuilder}, + host::{HostBlock, MockHostType}, + }, + prelude::*, + signer::Signer, + test_utils::get_dummy_bech32_account, + Height, + }; + + #[test] + fn test_history_manipulation() { + pub struct Test { + name: String, + ctx: MockContext, + } + let cv = 1; // The version to use for all chains. + + let tests: Vec = vec![ + Test { + name: "Empty history, small pruning window".to_string(), + ctx: MockContext::new( + ChainId::new("mockgaia".to_string(), cv), + MockHostType::Mock, + 2, + Height::new(cv, 1), + ), + }, + Test { + name: "Large pruning window".to_string(), + ctx: MockContext::new( + ChainId::new("mockgaia".to_string(), cv), + MockHostType::Mock, + 30, + Height::new(cv, 2), + ), + }, + Test { + name: "Small pruning window".to_string(), + ctx: MockContext::new( + ChainId::new("mockgaia".to_string(), cv), + MockHostType::Mock, + 3, + Height::new(cv, 30), + ), + }, + Test { + name: "Small pruning window, small starting height".to_string(), + ctx: MockContext::new( + ChainId::new("mockgaia".to_string(), cv), + MockHostType::Mock, + 3, + Height::new(cv, 2), + ), + }, + Test { + name: "Large pruning window, large starting height".to_string(), + ctx: MockContext::new( + ChainId::new("mockgaia".to_string(), cv), + MockHostType::Mock, + 50, + Height::new(cv, 2000), + ), + }, + ]; + + for mut test in tests { + // All tests should yield a valid context after initialization. + assert!( + test.ctx.validate().is_ok(), + "failed in test {} while validating context {:?}", + test.name, + test.ctx + ); + + let current_height = test.ctx.latest_height(); + + // After advancing the chain's height, the context should still be valid. + test.ctx.advance_host_chain_height(); + assert!( + test.ctx.validate().is_ok(), + "failed in test {} while validating context {:?}", + test.name, + test.ctx + ); + + let next_height = current_height.increment(); + assert_eq!( + test.ctx.latest_height(), + next_height, + "failed while increasing height for context {:?}", + test.ctx + ); + if current_height > Height::new(cv, 0) { + assert_eq!( + test.ctx.host_block(current_height).unwrap().height(), + current_height, + "failed while fetching height {:?} of context {:?}", + current_height, + test.ctx + ); + } + } + } + + #[test] + fn test_router() { + #[derive(Default)] + struct MockAck(Vec); + + impl AsRef<[u8]> for MockAck { + fn as_ref(&self) -> &[u8] { + self.0.as_slice() + } + } + + impl Acknowledgement for MockAck {} + + #[derive(Debug, Default)] + struct FooModule { + counter: usize, + } + + impl Module for FooModule { + fn on_chan_open_try( + &mut self, + _output: &mut ModuleOutputBuilder, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + _version: &Version, + counterparty_version: &Version, + ) -> Result { + Ok(counterparty_version.clone()) + } + + fn on_recv_packet( + &self, + _output: &mut ModuleOutputBuilder, + _packet: &Packet, + _relayer: &Signer, + ) -> OnRecvPacketAck { + OnRecvPacketAck::Successful( + Box::new(MockAck::default()), + Box::new(|module| { + let module = module.downcast_mut::().unwrap(); + module.counter += 1; + Ok(()) + }), + ) + } + } + + #[derive(Debug, Default)] + struct BarModule; + + impl Module for BarModule { + fn on_chan_open_try( + &mut self, + _output: &mut ModuleOutputBuilder, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + _version: &Version, + counterparty_version: &Version, + ) -> Result { + Ok(counterparty_version.clone()) + } + } + + let r = MockRouterBuilder::default() + .add_route("foomodule".parse().unwrap(), FooModule::default()) + .unwrap() + .add_route("barmodule".parse().unwrap(), BarModule::default()) + .unwrap() + .build(); + + let mut ctx = MockContext::::new( + ChainId::new("mockgaia".to_string(), 1), + MockHostType::Mock, + 1, + Height::new(1, 1), + ) + .with_router(r); + + let mut on_recv_packet_result = |module_id: &'static str| { + let module_id = ModuleId::from_str(module_id).unwrap(); + let m = ctx.router.get_route_mut(&module_id).unwrap(); + let result = m.on_recv_packet( + &mut ModuleOutputBuilder::new(), + &Packet::default(), + &get_dummy_bech32_account().parse().unwrap(), + ); + (module_id, result) + }; + + let results = vec![on_recv_packet_result("foomodule"), on_recv_packet_result("barmodule")]; + results + .into_iter() + .filter_map(|(mid, result)| match result { + OnRecvPacketAck::Nil(write_fn) | OnRecvPacketAck::Successful(_, write_fn) => { + Some((mid, write_fn)) + }, + _ => None, + }) + .for_each(|(mid, write_fn)| { + write_fn(ctx.router.get_route_mut(&mid).unwrap().as_any_mut()).unwrap() + }); + } +} + +#[cfg(test)] +impl Default for ClientId { + fn default() -> Self { + Self::new("07-tendermint", 0).unwrap() + } +} + +#[cfg(test)] +impl Default for Attributes { + fn default() -> Self { + Attributes { + height: Height::default(), + client_id: Default::default(), + client_type: "07-tendermint".to_owned(), + consensus_height: Height::default(), + } + } } diff --git a/modules/src/mock/header.rs b/modules/src/mock/header.rs index 381975ae48..89654618ae 100644 --- a/modules/src/mock/header.rs +++ b/modules/src/mock/header.rs @@ -1,109 +1,187 @@ +use ibc_proto::google::protobuf::Any; use serde_derive::{Deserialize, Serialize}; use tendermint_proto::Protobuf; use ibc_proto::ibc::mock::Header as RawMockHeader; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_type::ClientType; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::header::AnyHeader; -use crate::core::ics02_client::header::Header; -use crate::mock::client_state::MockConsensusState; -use crate::timestamp::Timestamp; -use crate::Height; +use crate::mock::host::{HostBlock, MockHostBlock}; +use crate::mock::misbehaviour::{MockMisbehaviour, MOCK_MISBEHAVIOUR_TYPE_URL}; +use crate::{ + core::ics02_client::{client_message::ClientMessage, error::Error}, + mock::client_state::{AnyConsensusState, MockConsensusState}, + timestamp::Timestamp, + Height, +}; + +pub const MOCK_HEADER_TYPE_URL: &str = "/ibc.mock.Header"; + +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, ClientMessage)] +#[allow(clippy::large_enum_variant)] +pub enum AnyClientMessage { + #[ibc(proto_url = "MOCK_HEADER_TYPE_URL")] + Mock(MockClientMessage), +} + +impl From for AnyClientMessage { + fn from(block: MockHostBlock) -> Self { + Self::Mock(MockClientMessage::Header(MockHeader::new(block.height()))) + } +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] +pub enum MockClientMessage { + Header(MockHeader), + Misbehaviour(MockMisbehaviour), +} + +impl From for MockClientMessage { + fn from(header: MockHeader) -> Self { + Self::Header(header) + } +} + +impl MockClientMessage { + pub fn header(&self) -> MockHeader { + match self { + MockClientMessage::Header(header) => header.clone(), + MockClientMessage::Misbehaviour(misbehaviour) => misbehaviour.header1.clone(), + } + } + + pub fn height(&self) -> Height { + match self { + MockClientMessage::Header(header) => header.height, + MockClientMessage::Misbehaviour(misbehaviour) => misbehaviour.header1.height, + } + } +} + +impl ClientMessage for MockClientMessage { + fn encode_to_vec(&self) -> Vec { + unreachable!() + } +} + +impl Protobuf for AnyClientMessage {} + +impl TryFrom for AnyClientMessage { + type Error = Error; + + fn try_from(value: Any) -> Result { + match value.type_url.as_str() { + MOCK_HEADER_TYPE_URL => Ok(Self::Mock(MockClientMessage::Header( + MockHeader::decode_vec(&value.value).map_err(Error::decode_raw_header)?, + ))), + MOCK_MISBEHAVIOUR_TYPE_URL => Ok(Self::Mock(MockClientMessage::Misbehaviour( + MockMisbehaviour::decode_vec(&value.value) + .map_err(Error::decode_raw_misbehaviour)?, + ))), + _ => Err(Error::unknown_consensus_state_type(value.type_url)), + } + } +} + +impl From for Any { + fn from(client_msg: AnyClientMessage) -> Self { + match client_msg { + AnyClientMessage::Mock(MockClientMessage::Header(header)) => { + Any { type_url: MOCK_HEADER_TYPE_URL.to_string(), value: header.encode_vec() } + }, + AnyClientMessage::Mock(MockClientMessage::Misbehaviour(misbehaviour)) => Any { + type_url: MOCK_MISBEHAVIOUR_TYPE_URL.to_string(), + value: misbehaviour.encode_vec(), + }, + } + } +} #[derive(Copy, Clone, Default, Debug, Deserialize, PartialEq, Eq, Serialize)] pub struct MockHeader { - pub height: Height, - pub timestamp: Timestamp, + pub height: Height, + pub timestamp: Timestamp, } impl Protobuf for MockHeader {} impl TryFrom for MockHeader { - type Error = Error; + type Error = Error; - fn try_from(raw: RawMockHeader) -> Result { - Ok(MockHeader { - height: raw.height.ok_or_else(Error::missing_raw_header)?.into(), + fn try_from(raw: RawMockHeader) -> Result { + Ok(MockHeader { + height: raw.height.ok_or_else(Error::missing_raw_header)?.into(), - timestamp: Timestamp::from_nanoseconds(raw.timestamp) - .map_err(Error::invalid_packet_timestamp)?, - }) - } + timestamp: Timestamp::from_nanoseconds(raw.timestamp) + .map_err(Error::invalid_packet_timestamp)?, + }) + } } impl From for RawMockHeader { - fn from(value: MockHeader) -> Self { - RawMockHeader { - height: Some(value.height.into()), - timestamp: value.timestamp.nanoseconds(), - } - } + fn from(value: MockHeader) -> Self { + RawMockHeader { + height: Some(value.height.into()), + timestamp: value.timestamp.nanoseconds(), + } + } } impl MockHeader { - pub fn height(&self) -> Height { - self.height - } + pub fn height(&self) -> Height { + self.height + } - pub fn new(height: Height) -> Self { - Self { - height, - timestamp: Timestamp::now(), - } - } + pub fn new(height: Height) -> Self { + Self { height, timestamp: Timestamp::now() } + } - pub fn with_timestamp(self, timestamp: Timestamp) -> Self { - Self { timestamp, ..self } - } -} + pub fn timestamp(&self) -> Timestamp { + self.timestamp + } -impl From for AnyHeader { - fn from(mh: MockHeader) -> Self { - Self::Mock(mh) - } + pub fn with_timestamp(self, timestamp: Timestamp) -> Self { + Self { timestamp, ..self } + } } -impl Header for MockHeader { - fn client_type(&self) -> ClientType { - ClientType::Mock - } - - fn height(&self) -> Height { - self.height - } +impl From for AnyClientMessage { + fn from(client_msg: MockClientMessage) -> Self { + Self::Mock(client_msg) + } +} - fn timestamp(&self) -> Timestamp { - self.timestamp - } +impl From for AnyClientMessage { + fn from(header: MockHeader) -> Self { + Self::Mock(MockClientMessage::Header(header)) + } +} - fn wrap_any(self) -> AnyHeader { - AnyHeader::Mock(self) - } +impl From for AnyClientMessage { + fn from(misbehaviour: MockMisbehaviour) -> Self { + Self::Mock(MockClientMessage::Misbehaviour(misbehaviour)) + } } impl From for AnyConsensusState { - fn from(h: MockHeader) -> Self { - AnyConsensusState::Mock(MockConsensusState::new(h)) - } + fn from(h: MockHeader) -> Self { + AnyConsensusState::Mock(MockConsensusState::new(h)) + } } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn encode_any() { - let header = MockHeader::new(Height::new(1, 10)).with_timestamp(Timestamp::none()); - let bytes = header.wrap_any().encode_vec().unwrap(); - - assert_eq!( - &bytes, - &[ - 10, 16, 47, 105, 98, 99, 46, 109, 111, 99, 107, 46, 72, 101, 97, 100, 101, 114, 18, - 6, 10, 4, 8, 1, 16, 10 - ] - ); - } + use super::*; + + #[test] + fn encode_any() { + let header = MockHeader::new(Height::new(1, 10)).with_timestamp(Timestamp::none()); + let bytes = AnyClientMessage::from(header).encode_vec(); + + assert_eq!( + &bytes, + &[ + 10, 16, 47, 105, 98, 99, 46, 109, 111, 99, 107, 46, 72, 101, 97, 100, 101, 114, 18, + 6, 10, 4, 8, 1, 16, 10 + ] + ); + } } diff --git a/modules/src/mock/host.rs b/modules/src/mock/host.rs index d94b90f893..f0f21f32a6 100644 --- a/modules/src/mock/host.rs +++ b/modules/src/mock/host.rs @@ -1,124 +1,95 @@ //! Host chain types and methods, used by context mock. -use tendermint_testgen::light_block::TmLightBlock; -use tendermint_testgen::{Generator, LightBlock as TestgenLightBlock}; - -use crate::clients::ics07_tendermint::consensus_state::ConsensusState as TMConsensusState; -use crate::clients::ics07_tendermint::header::Header as TMHeader; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::header::AnyHeader; -use crate::core::ics24_host::identifier::ChainId; -use crate::mock::header::MockHeader; -use crate::prelude::*; -use crate::timestamp::Timestamp; -use crate::Height; +use crate::{ + core::ics24_host::identifier::ChainId, + mock::{client_state::AnyConsensusState, header::MockHeader}, + prelude::*, + timestamp::Timestamp, + Height, +}; +use core::fmt::Debug; /// Defines the different types of host chains that a mock context can emulate. /// The variants are as follows: /// - `Mock` defines that the context history consists of `MockHeader` blocks. /// - `SyntheticTendermint`: the context has synthetically-generated Tendermint (light) blocks. /// See also the `HostBlock` enum to get more insights into the underlying block type. -#[derive(Clone, Debug, Copy)] -pub enum HostType { - Mock, - SyntheticTendermint, +#[derive(Clone, Debug, PartialEq, Eq, Copy)] +pub enum MockHostType { + Mock, +} + +impl Default for MockHostType { + fn default() -> Self { + MockHostType::Mock + } } /// Depending on `HostType` (the type of host chain underlying a context mock), this enum defines /// the type of blocks composing the history of the host chain. #[derive(Clone, Debug)] -pub enum HostBlock { - Mock(MockHeader), - SyntheticTendermint(Box), +pub enum MockHostBlock { + Mock(MockHeader), } -impl HostBlock { - /// Returns the height of a block. - pub fn height(&self) -> Height { - match self { - HostBlock::Mock(header) => header.height(), - HostBlock::SyntheticTendermint(light_block) => Height::new( - ChainId::chain_version(light_block.signed_header.header.chain_id.as_str()), - light_block.signed_header.header.height.value(), - ), - } - } +pub trait HostBlock { + type HostType: Debug + Default + Copy; - /// Returns the timestamp of a block. - pub fn timestamp(&self) -> Timestamp { - match self { - HostBlock::Mock(header) => header.timestamp, - HostBlock::SyntheticTendermint(light_block) => { - light_block.signed_header.header.time.into() - } - } - } + fn height(&self) -> Height; + fn timestamp(&self) -> Timestamp; + fn generate_block( + chain_id: ChainId, + chain_type: Self::HostType, + height: u64, + timestamp: Timestamp, + ) -> Self; +} - /// Generates a new block at `height` for the given chain identifier and chain type. - pub fn generate_block( - chain_id: ChainId, - chain_type: HostType, - height: u64, - timestamp: Timestamp, - ) -> HostBlock { - match chain_type { - HostType::Mock => HostBlock::Mock(MockHeader { - height: Height::new(chain_id.version(), height), - timestamp, - }), - HostType::SyntheticTendermint => HostBlock::SyntheticTendermint(Box::new( - Self::generate_tm_block(chain_id, height, timestamp), - )), - } - } +impl HostBlock for MockHostBlock { + type HostType = MockHostType; - pub fn generate_tm_block(chain_id: ChainId, height: u64, timestamp: Timestamp) -> TmLightBlock { - TestgenLightBlock::new_default_with_time_and_chain_id( - chain_id.to_string(), - timestamp.into_tm_time().unwrap(), - height, - ) - .generate() - .unwrap() - } -} + /// Returns the height of a block. + fn height(&self) -> Height { + match self { + MockHostBlock::Mock(header) => header.height(), + } + } -impl From for AnyConsensusState { - fn from(light_block: TmLightBlock) -> Self { - let cs = TMConsensusState::from(light_block.signed_header.header); - AnyConsensusState::Tendermint(cs) - } -} + /// Returns the timestamp of a block. + fn timestamp(&self) -> Timestamp { + match self { + MockHostBlock::Mock(header) => header.timestamp, + } + } -impl From for AnyConsensusState { - fn from(any_block: HostBlock) -> Self { - match any_block { - HostBlock::Mock(mock_header) => mock_header.into(), - HostBlock::SyntheticTendermint(light_block) => (*light_block).into(), - } - } + /// Generates a new block at `height` for the given chain identifier and chain type. + fn generate_block( + chain_id: ChainId, + chain_type: Self::HostType, + height: u64, + timestamp: Timestamp, + ) -> MockHostBlock { + match chain_type { + MockHostType::Mock => MockHostBlock::Mock(MockHeader { + height: Height::new(chain_id.version(), height), + timestamp, + }), + } + } } -impl From for AnyHeader { - fn from(any_block: HostBlock) -> Self { - match any_block { - HostBlock::Mock(mock_header) => mock_header.into(), - HostBlock::SyntheticTendermint(light_block_box) => { - // Conversion from TMLightBlock to AnyHeader - AnyHeader::Tendermint((*light_block_box).into()) - } - } - } +impl From for AnyConsensusState { + fn from(any_block: MockHostBlock) -> Self { + match any_block { + MockHostBlock::Mock(mock_header) => mock_header.into(), + } + } } -impl From for TMHeader { - fn from(light_block: TmLightBlock) -> Self { - // TODO: This conversion is incorrect for `trusted_height` and `trusted_validator_set`. - TMHeader { - signed_header: light_block.signed_header, - validator_set: light_block.validators, - trusted_height: Default::default(), - trusted_validator_set: light_block.next_validators, - } - } -} +// impl From for AnyClientMessage { +// fn from(any_block: MockHostBlock) -> Self { +// match any_block { +// MockHostBlock::Mock(mock_header) => mock_header.into(), +// } +// } +// } diff --git a/modules/src/mock/misbehaviour.rs b/modules/src/mock/misbehaviour.rs index 1b4d015178..a4cf7a325a 100644 --- a/modules/src/mock/misbehaviour.rs +++ b/modules/src/mock/misbehaviour.rs @@ -4,59 +4,53 @@ use tendermint_proto::Protobuf; use ibc_proto::ibc::mock::Misbehaviour as RawMisbehaviour; -use crate::core::ics02_client::error::Error; -use crate::core::ics02_client::misbehaviour::AnyMisbehaviour; -use crate::core::ics24_host::identifier::ClientId; -use crate::mock::header::MockHeader; -use crate::Height; - -#[derive(Clone, Debug, PartialEq)] -pub struct Misbehaviour { - pub client_id: ClientId, - pub header1: MockHeader, - pub header2: MockHeader, +use crate::{ + core::{ics02_client::error::Error, ics24_host::identifier::ClientId}, + mock::header::MockHeader, +}; + +pub const MOCK_MISBEHAVIOUR_TYPE_URL: &str = "/ibc.mock.Misbehavior"; + +#[derive(Clone, Debug, PartialEq, Protobuf)] +#[allow(clippy::large_enum_variant)] +pub enum AnyMisbehaviour { + #[ibc(proto_url = "MOCK_MISBEHAVIOUR_TYPE_URL")] + Mock(MockMisbehaviour), } -impl crate::core::ics02_client::misbehaviour::Misbehaviour for Misbehaviour { - fn client_id(&self) -> &ClientId { - &self.client_id - } - - fn height(&self) -> Height { - self.header1.height() - } +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] +pub struct MockMisbehaviour { + pub client_id: ClientId, + pub header1: MockHeader, + pub header2: MockHeader, +} - fn wrap_any(self) -> AnyMisbehaviour { - AnyMisbehaviour::Mock(self) - } +impl MockMisbehaviour { + fn encode_to_vec(&self) -> Vec { + self.encode_vec() + } } -impl Protobuf for Misbehaviour {} - -impl TryFrom for Misbehaviour { - type Error = Error; - - fn try_from(raw: RawMisbehaviour) -> Result { - Ok(Self { - client_id: Default::default(), - header1: raw - .header1 - .ok_or_else(Error::missing_raw_misbehaviour)? - .try_into()?, - header2: raw - .header2 - .ok_or_else(Error::missing_raw_misbehaviour)? - .try_into()?, - }) - } +impl Protobuf for MockMisbehaviour {} + +impl TryFrom for MockMisbehaviour { + type Error = Error; + + fn try_from(raw: RawMisbehaviour) -> Result { + Ok(Self { + client_id: Default::default(), + header1: raw.header1.ok_or_else(Error::missing_raw_misbehaviour)?.try_into()?, + header2: raw.header2.ok_or_else(Error::missing_raw_misbehaviour)?.try_into()?, + }) + } } -impl From for RawMisbehaviour { - fn from(value: Misbehaviour) -> Self { - RawMisbehaviour { - client_id: value.client_id.to_string(), - header1: Some(value.header1.into()), - header2: Some(value.header2.into()), - } - } +impl From for RawMisbehaviour { + fn from(value: MockMisbehaviour) -> Self { + RawMisbehaviour { + client_id: value.client_id.to_string(), + header1: Some(value.header1.into()), + header2: Some(value.header2.into()), + } + } } diff --git a/modules/src/prelude.rs b/modules/src/prelude.rs index bdce26208e..a973303b0d 100644 --- a/modules/src/prelude.rs +++ b/modules/src/prelude.rs @@ -2,14 +2,17 @@ pub use core::prelude::v1::*; // Re-export according to alloc::prelude::v1 because it is not yet stabilized // https://doc.rust-lang.org/src/alloc/prelude/v1.rs.html -pub use alloc::borrow::ToOwned; -pub use alloc::boxed::Box; -pub use alloc::string::{String, ToString}; -pub use alloc::vec::Vec; +pub use alloc::{ + borrow::ToOwned, + boxed::Box, + string::{String, ToString}, + vec::Vec, +}; -pub use alloc::format; -pub use alloc::vec; +pub use alloc::{format, vec}; // Those are exported by default in the std prelude in Rust 2021 -pub use core::convert::{TryFrom, TryInto}; -pub use core::iter::FromIterator; +pub use core::{ + convert::{TryFrom, TryInto}, + iter::FromIterator, +}; diff --git a/modules/src/proofs.rs b/modules/src/proofs.rs index 30e84aa726..b8d285b9a1 100644 --- a/modules/src/proofs.rs +++ b/modules/src/proofs.rs @@ -1,17 +1,16 @@ use serde::Serialize; -use crate::core::ics23_commitment::commitment::CommitmentProofBytes; -use crate::Height; +use crate::{core::ics23_commitment::commitment::CommitmentProofBytes, Height}; use flex_error::define_error; define_error! { - #[derive(Debug, PartialEq, Eq)] - ProofError { - ZeroHeight - | _ | { format_args!("proof height cannot be zero") }, - EmptyProof - | _ | { format_args!("proof cannot be empty") }, - } + #[derive(Debug, PartialEq, Eq)] + ProofError { + ZeroHeight + | _ | { format_args!("proof height cannot be zero") }, + EmptyProof + | _ | { format_args!("proof cannot be empty") }, + } } /// Structure comprising proofs in a message. Proofs are typically present in messages for @@ -19,93 +18,84 @@ define_error! { /// handshake, as well as for ICS4 packets, timeouts, and acknowledgements. #[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct Proofs { - object_proof: CommitmentProofBytes, - client_proof: Option, - consensus_proof: Option, - /// Currently used for proof_close for MsgTimeoutOnCLose where object_proof is proof_unreceived - other_proof: Option, - /// Height for the commitment root for proving the proofs above. - /// When creating these proofs, the chain is queried at `height-1`. - height: Height, + object_proof: CommitmentProofBytes, + client_proof: Option, + consensus_proof: Option, + /// Currently used for proof_close for MsgTimeoutOnCLose where object_proof is proof_unreceived + other_proof: Option, + /// Height for the commitment root for proving the proofs above. + /// When creating these proofs, the chain is queried at `height-1`. + height: Height, } impl Proofs { - pub fn new( - object_proof: CommitmentProofBytes, - client_proof: Option, - consensus_proof: Option, - other_proof: Option, - height: Height, - ) -> Result { - if height.is_zero() { - return Err(ProofError::zero_height()); - } + pub fn new( + object_proof: CommitmentProofBytes, + client_proof: Option, + consensus_proof: Option, + other_proof: Option, + height: Height, + ) -> Result { + if height.is_zero() { + return Err(ProofError::zero_height()) + } - Ok(Self { - object_proof, - client_proof, - consensus_proof, - other_proof, - height, - }) - } + Ok(Self { object_proof, client_proof, consensus_proof, other_proof, height }) + } - /// Getter for the consensus_proof field of this proof. Intuitively, this is a proof that a - /// client on the source chain stores a consensus state for the destination chain. - pub fn consensus_proof(&self) -> Option { - self.consensus_proof.clone() - } + /// Getter for the consensus_proof field of this proof. Intuitively, this is a proof that a + /// client on the source chain stores a consensus state for the destination chain. + pub fn consensus_proof(&self) -> Option { + self.consensus_proof.clone() + } - /// Getter for the height field of this proof (i.e., the consensus height where this proof was - /// created). - pub fn height(&self) -> Height { - self.height - } + /// Getter for the height field of this proof (i.e., the consensus height where this proof was + /// created). + pub fn height(&self) -> Height { + self.height + } - /// Getter for the object-specific proof (e.g., proof for connection state or channel state). - pub fn object_proof(&self) -> &CommitmentProofBytes { - &self.object_proof - } + /// Getter for the object-specific proof (e.g., proof for connection state or channel state). + pub fn object_proof(&self) -> &CommitmentProofBytes { + &self.object_proof + } - /// Getter for the client_proof. - pub fn client_proof(&self) -> &Option { - &self.client_proof - } + /// Getter for the client_proof. + pub fn client_proof(&self) -> &Option { + &self.client_proof + } - /// Getter for the other_proof. - pub fn other_proof(&self) -> &Option { - &self.other_proof - } + /// Getter for the other_proof. + pub fn other_proof(&self) -> &Option { + &self.other_proof + } } #[derive(Clone, Debug, PartialEq, Eq, Serialize)] pub struct ConsensusProof { - proof: CommitmentProofBytes, - height: Height, + proof: CommitmentProofBytes, + height: Height, } impl ConsensusProof { - pub fn new( - consensus_proof: CommitmentProofBytes, - consensus_height: Height, - ) -> Result { - if consensus_height.is_zero() { - return Err(ProofError::zero_height()); - } + pub fn new( + consensus_proof: CommitmentProofBytes, + consensus_height: Height, + ) -> Result { + if consensus_height.is_zero() { + return Err(ProofError::zero_height()) + } - Ok(Self { - proof: consensus_proof, - height: consensus_height, - }) - } + Ok(Self { proof: consensus_proof, height: consensus_height }) + } - /// Getter for the height field of this consensus proof. - pub fn height(&self) -> Height { - self.height - } + /// Getter for the height field of this consensus proof. + pub fn height(&self) -> Height { + self.height + } - /// Getter for the proof (CommitmentProof) field of this consensus proof. - pub fn proof(&self) -> &CommitmentProofBytes { - &self.proof - } + /// Getter for the proof (CommitmentProof) field of this consensus proof. + pub fn proof(&self) -> &CommitmentProofBytes { + &self.proof + } } diff --git a/modules/src/query.rs b/modules/src/query.rs deleted file mode 100644 index 9b7f035c8d..0000000000 --- a/modules/src/query.rs +++ /dev/null @@ -1,20 +0,0 @@ -use tendermint::abci::transaction::Hash; - -use crate::core::ics02_client::client_consensus::QueryClientEventRequest; -use crate::core::ics04_channel::channel::QueryPacketEventDataRequest; - -/// Used for queries and not yet standardized in channel's query.proto -#[derive(Clone, Debug)] -pub enum QueryTxRequest { - Packet(QueryPacketEventDataRequest), - Client(QueryClientEventRequest), - Transaction(QueryTxHash), -} - -#[derive(Clone, Debug)] -pub enum QueryBlockRequest { - Packet(QueryPacketEventDataRequest), -} - -#[derive(Clone, Debug)] -pub struct QueryTxHash(pub Hash); diff --git a/modules/src/relayer/ics18_relayer/context.rs b/modules/src/relayer/ics18_relayer/context.rs deleted file mode 100644 index 30665b193a..0000000000 --- a/modules/src/relayer/ics18_relayer/context.rs +++ /dev/null @@ -1,34 +0,0 @@ -use crate::prelude::*; -use ibc_proto::google::protobuf::Any; - -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::header::AnyHeader; -use crate::events::IbcEvent; - -use crate::core::ics24_host::identifier::ClientId; -use crate::relayer::ics18_relayer::error::Error; -use crate::signer::Signer; -use crate::Height; -/// Trait capturing all dependencies (i.e., the context) which algorithms in ICS18 require to -/// relay packets between chains. This trait comprises the dependencies towards a single chain. -/// Most of the functions in this represent wrappers over the ABCI interface. -/// This trait mimics the `Chain` trait, but at a lower level of abstraction (no networking, header -/// types, light client, RPC client, etc.) -pub trait Ics18Context { - /// Returns the latest height of the chain. - fn query_latest_height(&self) -> Height; - - /// Returns this client state for the given `client_id` on this chain. - /// Wrapper over the `/abci_query?path=..` endpoint. - fn query_client_full_state(&self, client_id: &ClientId) -> Option; - - /// Returns the most advanced header of this chain. - fn query_latest_header(&self) -> Option; - - /// Interface that the relayer uses to submit a datagram to this chain. - /// One can think of this as wrapping around the `/broadcast_tx_commit` ABCI endpoint. - fn send(&mut self, msgs: Vec) -> Result, Error>; - - /// Temporary solution. Similar to `CosmosSDKChain::key_and_signer()` but simpler. - fn signer(&self) -> Signer; -} diff --git a/modules/src/relayer/ics18_relayer/error.rs b/modules/src/relayer/ics18_relayer/error.rs deleted file mode 100644 index e975de5a3a..0000000000 --- a/modules/src/relayer/ics18_relayer/error.rs +++ /dev/null @@ -1,38 +0,0 @@ -use crate::core::ics24_host::identifier::ClientId; -use crate::core::ics26_routing::error::Error as RoutingError; -use crate::Height; -use flex_error::define_error; - -define_error! { - Error { - ClientStateNotFound - { client_id: ClientId } - | e | { format_args!("client state on destination chain not found, (client id: {0})", e.client_id) }, - - ClientAlreadyUpToDate - { - client_id: ClientId, - source_height: Height, - destination_height: Height, - } - | e | { - format_args!("the client on destination chain is already up-to-date (client id: {0}, source height: {1}, dest height: {2})", - e.client_id, e.source_height, e.destination_height) - }, - - ClientAtHigherHeight - { - client_id: ClientId, - source_height: Height, - destination_height: Height, - } - | e | { - format_args!("the client on destination chain is at a higher height (client id: {0}, source height: {1}, dest height: {2})", - e.client_id, e.source_height, e.destination_height) - }, - - TransactionFailed - [ RoutingError ] - | _ | { "transaction processing by modules failed" }, - } -} diff --git a/modules/src/relayer/ics18_relayer/mod.rs b/modules/src/relayer/ics18_relayer/mod.rs deleted file mode 100644 index 254c69aa4e..0000000000 --- a/modules/src/relayer/ics18_relayer/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! ICS 18: Relayer contains utilities for testing `ibc` against the Hermes relayer. - -pub mod context; -pub mod error; -pub mod utils; diff --git a/modules/src/relayer/ics18_relayer/utils.rs b/modules/src/relayer/ics18_relayer/utils.rs deleted file mode 100644 index 34cd79cd7a..0000000000 --- a/modules/src/relayer/ics18_relayer/utils.rs +++ /dev/null @@ -1,210 +0,0 @@ -use crate::core::ics02_client::header::{AnyHeader, Header}; -use crate::core::ics02_client::msgs::update_client::MsgUpdateAnyClient; -use crate::core::ics02_client::msgs::ClientMsg; -use crate::core::ics24_host::identifier::ClientId; -use crate::relayer::ics18_relayer::context::Ics18Context; -use crate::relayer::ics18_relayer::error::Error; - -/// Builds a `ClientMsg::UpdateClient` for a client with id `client_id` running on the `dest` -/// context, assuming that the latest header on the source context is `src_header`. -pub fn build_client_update_datagram( - dest: &Ctx, - client_id: &ClientId, - src_header: AnyHeader, -) -> Result -where - Ctx: Ics18Context, -{ - // Check if client for ibc0 on ibc1 has been updated to latest height: - // - query client state on destination chain - let dest_client_state = dest - .query_client_full_state(client_id) - .ok_or_else(|| Error::client_state_not_found(client_id.clone()))?; - - let dest_client_latest_height = dest_client_state.latest_height(); - - if src_header.height() == dest_client_latest_height { - return Err(Error::client_already_up_to_date( - client_id.clone(), - src_header.height(), - dest_client_latest_height, - )); - }; - - if dest_client_latest_height > src_header.height() { - return Err(Error::client_at_higher_height( - client_id.clone(), - src_header.height(), - dest_client_latest_height, - )); - }; - - // Client on destination chain can be updated. - Ok(ClientMsg::UpdateClient(MsgUpdateAnyClient { - client_id: client_id.clone(), - header: src_header, - signer: dest.signer(), - })) -} - -#[cfg(test)] -mod tests { - use crate::core::ics02_client::client_type::ClientType; - use crate::core::ics02_client::header::{AnyHeader, Header}; - use crate::core::ics24_host::identifier::{ChainId, ClientId}; - use crate::core::ics26_routing::msgs::Ics26Envelope; - use crate::mock::context::MockContext; - use crate::mock::host::HostType; - use crate::prelude::*; - use crate::relayer::ics18_relayer::context::Ics18Context; - use crate::relayer::ics18_relayer::utils::build_client_update_datagram; - use crate::Height; - use test_log::test; - use tracing::debug; - - #[test] - /// Serves to test both ICS 26 `dispatch` & `build_client_update_datagram` functions. - /// Implements a "ping pong" of client update messages, so that two chains repeatedly - /// process a client update message and update their height in succession. - fn client_update_ping_pong() { - let chain_a_start_height = Height::new(1, 11); - let chain_b_start_height = Height::new(1, 20); - let client_on_b_for_a_height = Height::new(1, 10); // Should be smaller than `chain_a_start_height` - let client_on_a_for_b_height = Height::new(1, 20); // Should be smaller than `chain_b_start_height` - let num_iterations = 4; - - let client_on_a_for_b = ClientId::new(ClientType::Tendermint, 0).unwrap(); - let client_on_b_for_a = ClientId::new(ClientType::Mock, 0).unwrap(); - - // Create two mock contexts, one for each chain. - let mut ctx_a = MockContext::new( - ChainId::new("mockgaiaA".to_string(), 1), - HostType::Mock, - 5, - chain_a_start_height, - ) - .with_client_parametrized( - &client_on_a_for_b, - client_on_a_for_b_height, - Some(ClientType::Tendermint), // The target host chain (B) is synthetic TM. - Some(client_on_a_for_b_height), - ); - let mut ctx_b = MockContext::new( - ChainId::new("mockgaiaB".to_string(), 1), - HostType::SyntheticTendermint, - 5, - chain_b_start_height, - ) - .with_client_parametrized( - &client_on_b_for_a, - client_on_b_for_a_height, - Some(ClientType::Mock), // The target host chain is mock. - Some(client_on_b_for_a_height), - ); - - for _i in 0..num_iterations { - // Update client on chain B to latest height of A. - // - create the client update message with the latest header from A - let a_latest_header = ctx_a.query_latest_header().unwrap(); - assert_eq!( - a_latest_header.client_type(), - ClientType::Mock, - "Client type verification in header failed for context A (Mock); got {:?} but expected {:?}", - a_latest_header.client_type(), - ClientType::Mock - ); - - let client_msg_b_res = - build_client_update_datagram(&ctx_b, &client_on_b_for_a, a_latest_header); - - assert!( - client_msg_b_res.is_ok(), - "create_client_update failed for context destination {:?}, error: {:?}", - ctx_b, - client_msg_b_res - ); - - let client_msg_b = client_msg_b_res.unwrap(); - - // - send the message to B. We bypass ICS18 interface and call directly into - // MockContext `recv` method (to avoid additional serialization steps). - let dispatch_res_b = ctx_b.deliver(Ics26Envelope::Ics2Msg(client_msg_b)); - let validation_res = ctx_b.validate(); - assert!( - validation_res.is_ok(), - "context validation failed with error {:?} for context {:?}", - validation_res, - ctx_b - ); - - // Check if the update succeeded. - assert!( - dispatch_res_b.is_ok(), - "Dispatch failed for host chain b with error: {:?}", - dispatch_res_b - ); - let client_height_b = ctx_b - .query_client_full_state(&client_on_b_for_a) - .unwrap() - .latest_height(); - assert_eq!(client_height_b, ctx_a.query_latest_height()); - - // Update client on chain B to latest height of B. - // - create the client update message with the latest header from B - // The test uses LightClientBlock that does not store the trusted height - let b_latest_header = match ctx_b.query_latest_header().unwrap() { - AnyHeader::Tendermint(header) => { - let th = header.height(); - let mut hheader = header.clone(); - hheader.trusted_height = th.decrement().unwrap(); - hheader.wrap_any() - } - AnyHeader::Mock(header) => header.wrap_any(), - }; - - assert_eq!( - b_latest_header.client_type(), - ClientType::Tendermint, - "Client type verification in header failed for context B (TM); got {:?} but expected {:?}", - b_latest_header.client_type(), - ClientType::Tendermint - ); - - let client_msg_a_res = - build_client_update_datagram(&ctx_a, &client_on_a_for_b, b_latest_header); - - assert!( - client_msg_a_res.is_ok(), - "create_client_update failed for context destination {:?}, error: {:?}", - ctx_a, - client_msg_a_res - ); - - let client_msg_a = client_msg_a_res.unwrap(); - - debug!("client_msg_a = {:?}", client_msg_a); - - // - send the message to A - let dispatch_res_a = ctx_a.deliver(Ics26Envelope::Ics2Msg(client_msg_a)); - let validation_res = ctx_a.validate(); - assert!( - validation_res.is_ok(), - "context validation failed with error {:?} for context {:?}", - validation_res, - ctx_a - ); - - // Check if the update succeeded. - assert!( - dispatch_res_a.is_ok(), - "Dispatch failed for host chain a with error: {:?}", - dispatch_res_a - ); - let client_height_a = ctx_a - .query_client_full_state(&client_on_a_for_b) - .unwrap() - .latest_height(); - assert_eq!(client_height_a, ctx_b.query_latest_height()); - } - } -} diff --git a/modules/src/relayer/mod.rs b/modules/src/relayer/mod.rs deleted file mode 100644 index e88996bcd5..0000000000 --- a/modules/src/relayer/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! Utilities for testing the `ibc` crate against the [Hermes IBC relayer][relayer-repo]. -//! -//! [relayer-repo]: https://github.com/informalsystems/ibc-rs/tree/master/relayer - -pub mod ics18_relayer; diff --git a/modules/src/serializers.rs b/modules/src/serializers.rs index bd6f346336..7cadbe3e43 100644 --- a/modules/src/serializers.rs +++ b/modules/src/serializers.rs @@ -1,38 +1,50 @@ -use serde::ser::{Serialize, Serializer}; +use crate::prelude::*; +use serde::{ + ser::{Serialize, Serializer}, + Deserialize, Deserializer, +}; use subtle_encoding::{Encoding, Hex}; pub fn ser_hex_upper(data: T, serializer: S) -> Result where - S: Serializer, - T: AsRef<[u8]>, + S: Serializer, + T: AsRef<[u8]>, { - let hex = Hex::upper_case().encode_to_string(data).unwrap(); - hex.serialize(serializer) + let hex = Hex::upper_case().encode_to_string(data).unwrap(); + hex.serialize(serializer) +} + +pub fn deser_hex_upper<'de, T, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, + T: AsRef<[u8]>, + T: From>, +{ + let hex = String::deserialize(deserializer)?; + let bytes = Hex::upper_case().decode(hex.as_bytes()).unwrap(); + Ok(bytes.into()) } pub mod serde_string { - use alloc::string::String; - use core::fmt::Display; - use core::str::FromStr; + use alloc::string::String; + use core::{fmt::Display, str::FromStr}; - use serde::{de, Deserialize, Deserializer, Serializer}; + use serde::{de, Deserialize, Deserializer, Serializer}; - pub fn serialize(value: &T, serializer: S) -> Result - where - T: Display, - S: Serializer, - { - serializer.collect_str(value) - } + pub fn serialize(value: &T, serializer: S) -> Result + where + T: Display, + S: Serializer, + { + serializer.collect_str(value) + } - pub fn deserialize<'de, T, D>(deserializer: D) -> Result - where - T: FromStr, - T::Err: Display, - D: Deserializer<'de>, - { - String::deserialize(deserializer)? - .parse() - .map_err(de::Error::custom) - } + pub fn deserialize<'de, T, D>(deserializer: D) -> Result + where + T: FromStr, + T::Err: Display, + D: Deserializer<'de>, + { + String::deserialize(deserializer)?.parse().map_err(de::Error::custom) + } } diff --git a/modules/src/signer.rs b/modules/src/signer.rs index 21c62bf116..febf31da3c 100644 --- a/modules/src/signer.rs +++ b/modules/src/signer.rs @@ -7,30 +7,30 @@ use flex_error::define_error; use serde::{Deserialize, Serialize}; define_error! { - #[derive(Debug, PartialEq, Eq)] - SignerError { - EmptySigner - | _ | { "signer cannot be empty" }, - } + #[derive(Debug, PartialEq, Eq)] + SignerError { + EmptySigner + | _ | { "signer cannot be empty" }, + } } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, Display)] pub struct Signer(String); impl FromStr for Signer { - type Err = SignerError; + type Err = SignerError; - fn from_str(s: &str) -> Result { - let s = s.to_string(); - if s.trim().is_empty() { - return Err(SignerError::empty_signer()); - } - Ok(Self(s)) - } + fn from_str(s: &str) -> Result { + let s = s.to_string(); + if s.trim().is_empty() { + return Err(SignerError::empty_signer()) + } + Ok(Self(s)) + } } impl AsRef for Signer { - fn as_ref(&self) -> &str { - self.0.as_str() - } + fn as_ref(&self) -> &str { + self.0.as_str() + } } diff --git a/modules/src/test.rs b/modules/src/test.rs index b6a9cb1f34..c9dc728b75 100644 --- a/modules/src/test.rs +++ b/modules/src/test.rs @@ -9,19 +9,19 @@ use serde::{de::DeserializeOwned, Serialize}; /// - that the two parsed structs are equal according to their `PartialEq` impl pub fn test_serialization_roundtrip(json_data: &str) where - T: Debug + Serialize + DeserializeOwned, + T: Debug + Serialize + DeserializeOwned, { - let parsed0 = serde_json::from_str::(json_data); - assert!(parsed0.is_ok()); - let parsed0 = parsed0.unwrap(); + let parsed0 = serde_json::from_str::(json_data); + assert!(parsed0.is_ok()); + let parsed0 = parsed0.unwrap(); - let serialized = serde_json::to_string(&parsed0); - assert!(serialized.is_ok()); - let serialized = serialized.unwrap(); + let serialized = serde_json::to_string(&parsed0); + assert!(serialized.is_ok()); + let serialized = serialized.unwrap(); - let parsed1 = serde_json::from_str::(&serialized); - assert!(parsed1.is_ok()); + let parsed1 = serde_json::from_str::(&serialized); + assert!(parsed1.is_ok()); - // TODO - fix PartialEq bound issue in AbciQuery - //assert_eq!(parsed0, parsed1); + // TODO - fix PartialEq bound issue in AbciQuery + //assert_eq!(parsed0, parsed1); } diff --git a/modules/src/test_utils.rs b/modules/src/test_utils.rs index 9dbbe0596b..1691a51c9a 100644 --- a/modules/src/test_utils.rs +++ b/modules/src/test_utils.rs @@ -1,384 +1,648 @@ -use std::sync::{Arc, Mutex}; -use std::time::Duration; - +use std::{ + sync::{Arc, Mutex}, + time::Duration, +}; + +use crate::{ + applications::transfer::{ + context::{BankKeeper, Ics20Context, Ics20Keeper, Ics20Reader}, + error::Error as Ics20Error, + PrefixedCoin, + }, + core::{ + ics02_client::{ + client_state::ClientType, + context::{ClientKeeper, ClientReader}, + error::Error as Ics02Error, + }, + ics03_connection::{ + connection::ConnectionEnd, context::ConnectionReader, error::Error as Ics03Error, + }, + ics04_channel::{ + channel::{ChannelEnd, Counterparty, Order}, + commitment::{AcknowledgementCommitment, PacketCommitment}, + context::{ChannelKeeper, ChannelReader}, + error::Error, + packet::{Receipt, Sequence}, + Version, + }, + ics05_port::{context::PortReader, error::Error as PortError}, + ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}, + ics26_routing::context::{Module, ModuleId, ModuleOutputBuilder, ReaderContext}, + }, + mock::context::{HostBlockType, MockIbcStore}, + prelude::*, + signer::Signer, + timestamp::Timestamp, + Height, +}; + +use crate::core::ics02_client::context::ClientTypes; use tendermint::{block, consensus, evidence, public_key::Algorithm}; -use crate::applications::transfer::context::{BankKeeper, Ics20Context, Ics20Keeper, Ics20Reader}; -use crate::applications::transfer::{error::Error as Ics20Error, PrefixedCoin}; -use crate::core::ics02_client::client_consensus::AnyConsensusState; -use crate::core::ics02_client::client_state::AnyClientState; -use crate::core::ics02_client::error::Error as Ics02Error; -use crate::core::ics03_connection::connection::ConnectionEnd; -use crate::core::ics03_connection::error::Error as Ics03Error; -use crate::core::ics04_channel::channel::{ChannelEnd, Counterparty, Order}; -use crate::core::ics04_channel::commitment::{AcknowledgementCommitment, PacketCommitment}; -use crate::core::ics04_channel::context::{ChannelKeeper, ChannelReader}; -use crate::core::ics04_channel::error::Error; -use crate::core::ics04_channel::packet::{Receipt, Sequence}; -use crate::core::ics04_channel::Version; -use crate::core::ics05_port::context::PortReader; -use crate::core::ics05_port::error::Error as PortError; -use crate::core::ics24_host::identifier::{ChannelId, ClientId, ConnectionId, PortId}; -use crate::core::ics26_routing::context::{Module, ModuleId, ModuleOutputBuilder}; -use crate::mock::context::MockIbcStore; -use crate::prelude::*; -use crate::signer::Signer; -use crate::timestamp::Timestamp; -use crate::Height; - // Needed in mocks. pub fn default_consensus_params() -> consensus::Params { - consensus::Params { - block: block::Size { - max_bytes: 22020096, - max_gas: -1, - time_iota_ms: 1000, - }, - evidence: evidence::Params { - max_age_num_blocks: 100000, - max_age_duration: evidence::Duration(core::time::Duration::new(48 * 3600, 0)), - max_bytes: 0, - }, - validator: consensus::params::ValidatorParams { - pub_key_types: vec![Algorithm::Ed25519], - }, - version: Some(consensus::params::VersionParams::default()), - } + consensus::Params { + block: block::Size { max_bytes: 22020096, max_gas: -1, time_iota_ms: 1000 }, + evidence: evidence::Params { + max_age_num_blocks: 100000, + max_age_duration: evidence::Duration(core::time::Duration::new(48 * 3600, 0)), + max_bytes: 0, + }, + validator: consensus::params::ValidatorParams { pub_key_types: vec![Algorithm::Ed25519] }, + version: Some(consensus::params::VersionParams::default()), + } } pub fn get_dummy_proof() -> Vec { - "Y29uc2Vuc3VzU3RhdGUvaWJjb25lY2xpZW50LzIy" - .as_bytes() - .to_vec() + "Y29uc2Vuc3VzU3RhdGUvaWJjb25lY2xpZW50LzIy".as_bytes().to_vec() } pub fn get_dummy_account_id() -> Signer { - "0CDA3F47EF3C4906693B170EF650EB968C5F4B2C".parse().unwrap() + "0CDA3F47EF3C4906693B170EF650EB968C5F4B2C".parse().unwrap() } pub fn get_dummy_bech32_account() -> String { - "cosmos1wxeyh7zgn4tctjzs0vtqpc6p5cxq5t2muzl7ng".to_string() + "cosmos1wxeyh7zgn4tctjzs0vtqpc6p5cxq5t2muzl7ng".to_string() +} + +#[derive(Debug, Clone)] +pub struct DummyTransferModule { + ibc_store: Arc>>, +} + +impl PartialEq for DummyTransferModule { + fn eq(&self, _other: &Self) -> bool { + false + } +} + +impl Eq for DummyTransferModule {} + +impl DummyTransferModule { + pub fn new(ibc_store: Arc>>) -> Self { + Self { ibc_store } + } +} + +impl Module for DummyTransferModule { + fn on_chan_open_try( + &mut self, + _output: &mut ModuleOutputBuilder, + _order: Order, + _connection_hops: &[ConnectionId], + _port_id: &PortId, + _channel_id: &ChannelId, + _counterparty: &Counterparty, + _version: &Version, + counterparty_version: &Version, + ) -> Result { + Ok(counterparty_version.clone()) + } +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct Crypto; +/* +impl HostFunctionsProvider for Crypto { + fn keccak_256(input: &[u8]) -> [u8; 32] { + keccak_256(input) + } + + fn secp256k1_ecdsa_recover_compressed( + signature: &[u8; 65], + value: &[u8; 32], + ) -> Option> { + sp_io::crypto::secp256k1_ecdsa_recover_compressed(signature, value) + .ok() + .map(|val| val.to_vec()) + } + + fn ed25519_verify(_signature: &[u8; 64], _msg: &[u8], _pubkey: &[u8]) -> bool { + true + } + + fn verify_membership_trie_proof( + root: &[u8; 32], + proof: &[Vec], + key: &[u8], + value: &[u8], + ) -> Result<(), Ics02Error> { + let item = vec![(key, Some(value))]; + sp_trie::verify_trie_proof::, _, _, _>( + &sp_core::H256::from_slice(root), + proof, + &item, + ) + .map_err(|e| panic!("{}", e)) + } + + fn verify_non_membership_trie_proof( + root: &[u8; 32], + proof: &[Vec], + key: &[u8], + ) -> Result<(), Ics02Error> { + let item: Vec<(&[u8], Option<&[u8]>)> = vec![(key, None)]; + sp_trie::verify_trie_proof::, _, _, _>( + &sp_core::H256::from_slice(root), + proof, + &item, + ) + .map_err(|e| panic!("{}", e)) + } + + fn sha256_digest(data: &[u8]) -> [u8; 32] { + sp_io::hashing::sha2_256(data) + } + + fn sha2_256(message: &[u8]) -> [u8; 32] { + sp_io::hashing::sha2_256(message) + } + + fn sha2_512(message: &[u8]) -> [u8; 64] { + use sha2::Digest; + let mut hasher = sha2::Sha512::new(); + hasher.update(message); + let hash = hasher.finalize(); + let mut res = [0u8; 64]; + res.copy_from_slice(&hash); + res + } + + fn sha2_512_truncated(message: &[u8]) -> [u8; 32] { + use sha2::Digest; + let mut hasher = sha2::Sha512::new(); + hasher.update(message); + let hash = hasher.finalize(); + let mut res = [0u8; 32]; + res.copy_from_slice(&hash[..32]); + res + } + + fn sha3_512(message: &[u8]) -> [u8; 64] { + use sha3::Digest; + let mut hasher = sha3::Sha3_512::new(); + hasher.update(message); + let hash = hasher.finalize(); + let mut res = [0u8; 64]; + res.copy_from_slice(&hash); + res + } + + fn ripemd160(message: &[u8]) -> [u8; 20] { + use ripemd::Digest; + let mut hasher = ripemd::Ripemd160::new(); + hasher.update(message); + let hash = hasher.finalize(); + let mut res = [0u8; 20]; + res.copy_from_slice(&hash); + res + } + + fn verify_timestamp_extrinsic( + root: &[u8; 32], + proof: &[Vec], + value: &[u8], + ) -> Result<(), Ics02Error> { + let root = sp_core::H256::from_slice(root); + let key = codec::Compact(0u32).encode(); + sp_io::trie::blake2_256_verify_proof( + root, + proof, + &key, + value, + sp_core::storage::StateVersion::V0, + ) + .then(|| ()) + .ok_or_else(|| { + Ics02Error::implementation_specific("timestamp verification failed".to_string()) + }) + } } + */ + +// implementation for ics23 +impl ics23::HostFunctionsProvider for Crypto { + fn sha2_256(_message: &[u8]) -> [u8; 32] { + unimplemented!() + } + + fn sha2_512(_message: &[u8]) -> [u8; 64] { + unimplemented!() + } -#[derive(Debug)] -pub struct DummyTransferModule { - ibc_store: Arc>, + fn sha2_512_truncated(_message: &[u8]) -> [u8; 32] { + unimplemented!() + } + + fn sha3_512(_message: &[u8]) -> [u8; 64] { + unimplemented!() + } + + fn ripemd160(_message: &[u8]) -> [u8; 20] { + unimplemented!() + } } -impl DummyTransferModule { - pub fn new(ibc_store: Arc>) -> Self { - Self { ibc_store } - } +impl Ics20Keeper for DummyTransferModule { + type AccountId = Signer; } -impl Module for DummyTransferModule { - fn on_chan_open_try( - &mut self, - _output: &mut ModuleOutputBuilder, - _order: Order, - _connection_hops: &[ConnectionId], - _port_id: &PortId, - _channel_id: &ChannelId, - _counterparty: &Counterparty, - _version: &Version, - counterparty_version: &Version, - ) -> Result { - Ok(counterparty_version.clone()) - } +impl ChannelKeeper for DummyTransferModule { + fn store_packet_commitment( + &mut self, + key: (PortId, ChannelId, Sequence), + commitment: PacketCommitment, + ) -> Result<(), Error> { + self.ibc_store.lock().unwrap().packet_commitment.insert(key, commitment); + Ok(()) + } + + fn delete_packet_commitment( + &mut self, + _key: (PortId, ChannelId, Sequence), + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_packet_receipt( + &mut self, + _key: (PortId, ChannelId, Sequence), + _receipt: Receipt, + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_packet_acknowledgement( + &mut self, + _key: (PortId, ChannelId, Sequence), + _ack: AcknowledgementCommitment, + ) -> Result<(), Error> { + unimplemented!() + } + + fn delete_packet_acknowledgement( + &mut self, + _key: (PortId, ChannelId, Sequence), + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_connection_channels( + &mut self, + _conn_id: ConnectionId, + _port_channel_id: &(PortId, ChannelId), + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_channel( + &mut self, + _port_channel_id: (PortId, ChannelId), + _channel_end: &ChannelEnd, + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_next_sequence_send( + &mut self, + port_channel_id: (PortId, ChannelId), + seq: Sequence, + ) -> Result<(), Error> { + self.ibc_store.lock().unwrap().next_sequence_send.insert(port_channel_id, seq); + Ok(()) + } + + fn store_next_sequence_recv( + &mut self, + _port_channel_id: (PortId, ChannelId), + _seq: Sequence, + ) -> Result<(), Error> { + unimplemented!() + } + + fn store_next_sequence_ack( + &mut self, + _port_channel_id: (PortId, ChannelId), + _seq: Sequence, + ) -> Result<(), Error> { + unimplemented!() + } + + fn increase_channel_counter(&mut self) { + unimplemented!() + } + + fn store_send_packet( + &mut self, + _key: (PortId, ChannelId, Sequence), + _packet: crate::core::ics04_channel::packet::Packet, + ) -> Result<(), Error> { + Ok(()) + } + + fn store_recv_packet( + &mut self, + _key: (PortId, ChannelId, Sequence), + _packet: crate::core::ics04_channel::packet::Packet, + ) -> Result<(), Error> { + Ok(()) + } } -impl Ics20Keeper for DummyTransferModule { - type AccountId = Signer; +impl PortReader for DummyTransferModule { + fn lookup_module_by_port(&self, _port_id: &PortId) -> Result { + unimplemented!() + } } -impl ChannelKeeper for DummyTransferModule { - fn store_packet_commitment( - &mut self, - key: (PortId, ChannelId, Sequence), - commitment: PacketCommitment, - ) -> Result<(), Error> { - self.ibc_store - .lock() - .unwrap() - .packet_commitment - .insert(key, commitment); - Ok(()) - } - - fn delete_packet_commitment( - &mut self, - _key: (PortId, ChannelId, Sequence), - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_packet_receipt( - &mut self, - _key: (PortId, ChannelId, Sequence), - _receipt: Receipt, - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_packet_acknowledgement( - &mut self, - _key: (PortId, ChannelId, Sequence), - _ack: AcknowledgementCommitment, - ) -> Result<(), Error> { - unimplemented!() - } - - fn delete_packet_acknowledgement( - &mut self, - _key: (PortId, ChannelId, Sequence), - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_connection_channels( - &mut self, - _conn_id: ConnectionId, - _port_channel_id: &(PortId, ChannelId), - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_channel( - &mut self, - _port_channel_id: (PortId, ChannelId), - _channel_end: &ChannelEnd, - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_next_sequence_send( - &mut self, - port_channel_id: (PortId, ChannelId), - seq: Sequence, - ) -> Result<(), Error> { - self.ibc_store - .lock() - .unwrap() - .next_sequence_send - .insert(port_channel_id, seq); - Ok(()) - } - - fn store_next_sequence_recv( - &mut self, - _port_channel_id: (PortId, ChannelId), - _seq: Sequence, - ) -> Result<(), Error> { - unimplemented!() - } - - fn store_next_sequence_ack( - &mut self, - _port_channel_id: (PortId, ChannelId), - _seq: Sequence, - ) -> Result<(), Error> { - unimplemented!() - } - - fn increase_channel_counter(&mut self) { - unimplemented!() - } +impl BankKeeper for DummyTransferModule { + type AccountId = Signer; + + fn send_coins( + &mut self, + _from: &Self::AccountId, + _to: &Self::AccountId, + _amt: &PrefixedCoin, + ) -> Result<(), Ics20Error> { + Ok(()) + } + + fn mint_coins( + &mut self, + _account: &Self::AccountId, + _amt: &PrefixedCoin, + ) -> Result<(), Ics20Error> { + Ok(()) + } + + fn burn_coins( + &mut self, + _account: &Self::AccountId, + _amt: &PrefixedCoin, + ) -> Result<(), Ics20Error> { + Ok(()) + } } -impl PortReader for DummyTransferModule { - fn lookup_module_by_port(&self, _port_id: &PortId) -> Result { - unimplemented!() - } +impl Ics20Reader for DummyTransferModule { + type AccountId = Signer; + + fn get_port(&self) -> Result { + Ok(PortId::transfer()) + } + + fn is_send_enabled(&self) -> bool { + true + } + + fn is_receive_enabled(&self) -> bool { + true + } } -impl BankKeeper for DummyTransferModule { - type AccountId = Signer; - - fn send_coins( - &mut self, - _from: &Self::AccountId, - _to: &Self::AccountId, - _amt: &PrefixedCoin, - ) -> Result<(), Ics20Error> { - Ok(()) - } - - fn mint_coins( - &mut self, - _account: &Self::AccountId, - _amt: &PrefixedCoin, - ) -> Result<(), Ics20Error> { - Ok(()) - } - - fn burn_coins( - &mut self, - _account: &Self::AccountId, - _amt: &PrefixedCoin, - ) -> Result<(), Ics20Error> { - Ok(()) - } +impl ConnectionReader for DummyTransferModule { + fn connection_end(&self, cid: &ConnectionId) -> Result { + match self.ibc_store.lock().unwrap().connections.get(cid) { + Some(connection_end) => Ok(connection_end.clone()), + None => Err(Ics03Error::connection_not_found(cid.clone())), + } + } + + fn host_oldest_height(&self) -> Height { + todo!() + } + + fn commitment_prefix(&self) -> crate::core::ics23_commitment::commitment::CommitmentPrefix { + todo!() + } + + fn connection_counter(&self) -> Result { + todo!() + } } -impl Ics20Reader for DummyTransferModule { - type AccountId = Signer; +impl ClientReader for DummyTransferModule { + fn client_state(&self, client_id: &ClientId) -> Result { + match self.ibc_store.lock().unwrap().clients.get(client_id) { + Some(client_record) => client_record + .client_state + .clone() + .ok_or_else(|| Ics02Error::client_not_found(client_id.clone())), + None => Err(Ics02Error::client_not_found(client_id.clone())), + } + } + + fn host_height(&self) -> Height { + Height::zero() + } + + fn host_consensus_state( + &self, + _height: Height, + _proof: Option>, + ) -> Result { + unimplemented!() + } + + fn consensus_state( + &self, + client_id: &ClientId, + height: Height, + ) -> Result { + match self.ibc_store.lock().unwrap().clients.get(client_id) { + Some(client_record) => match client_record.consensus_states.get(&height) { + Some(consensus_state) => Ok(consensus_state.clone()), + None => Err(Ics02Error::consensus_state_not_found(client_id.clone(), height)), + }, + None => Err(Ics02Error::consensus_state_not_found(client_id.clone(), height)), + } + } + + fn client_type(&self, _client_id: &ClientId) -> Result { + todo!() + } + + fn next_consensus_state( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result, Ics02Error> { + todo!() + } + + fn prev_consensus_state( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result, Ics02Error> { + todo!() + } + + fn host_timestamp(&self) -> Timestamp { + todo!() + } + + fn client_counter(&self) -> Result { + todo!() + } + + fn host_client_type(&self) -> String { + unimplemented!() + } +} - fn get_port(&self) -> Result { - Ok(PortId::transfer()) - } +impl ChannelReader for DummyTransferModule { + fn channel_end(&self, pcid: &(PortId, ChannelId)) -> Result { + match self.ibc_store.lock().unwrap().channels.get(pcid) { + Some(channel_end) => Ok(channel_end.clone()), + None => Err(Error::channel_not_found(pcid.0.clone(), pcid.1)), + } + } + + fn connection_channels(&self, _cid: &ConnectionId) -> Result, Error> { + unimplemented!() + } + + fn get_next_sequence_send( + &self, + port_channel_id: &(PortId, ChannelId), + ) -> Result { + match self.ibc_store.lock().unwrap().next_sequence_send.get(port_channel_id) { + Some(sequence) => Ok(*sequence), + None => Err(Error::missing_next_send_seq(port_channel_id.clone())), + } + } + + fn get_next_sequence_recv( + &self, + _port_channel_id: &(PortId, ChannelId), + ) -> Result { + unimplemented!() + } + + fn get_next_sequence_ack( + &self, + _port_channel_id: &(PortId, ChannelId), + ) -> Result { + unimplemented!() + } + + fn get_packet_commitment( + &self, + _key: &(PortId, ChannelId, Sequence), + ) -> Result { + unimplemented!() + } + + fn get_packet_receipt(&self, _key: &(PortId, ChannelId, Sequence)) -> Result { + unimplemented!() + } + + fn get_packet_acknowledgement( + &self, + _key: &(PortId, ChannelId, Sequence), + ) -> Result { + unimplemented!() + } + + fn hash(&self, value: Vec) -> Vec { + use sha2::Digest; + + sha2::Sha256::digest(value).to_vec() + } + + fn client_update_time( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result { + unimplemented!() + } + + fn client_update_height( + &self, + _client_id: &ClientId, + _height: Height, + ) -> Result { + unimplemented!() + } + + fn channel_counter(&self) -> Result { + unimplemented!() + } + + fn max_expected_time_per_block(&self) -> Duration { + unimplemented!() + } +} - fn is_send_enabled(&self) -> bool { - true - } +impl ClientTypes for DummyTransferModule { + type AnyClientMessage = C::AnyClientMessage; + type AnyClientState = C::AnyClientState; + type AnyConsensusState = C::AnyConsensusState; + type ClientDef = C::ClientDef; +} - fn is_receive_enabled(&self) -> bool { - true - } +impl HostBlockType for DummyTransferModule { + type HostBlock = C::HostBlock; } -impl ChannelReader for DummyTransferModule { - fn channel_end(&self, pcid: &(PortId, ChannelId)) -> Result { - match self.ibc_store.lock().unwrap().channels.get(pcid) { - Some(channel_end) => Ok(channel_end.clone()), - None => Err(Error::channel_not_found(pcid.0.clone(), pcid.1)), - } - } - - fn connection_end(&self, cid: &ConnectionId) -> Result { - match self.ibc_store.lock().unwrap().connections.get(cid) { - Some(connection_end) => Ok(connection_end.clone()), - None => Err(Ics03Error::connection_not_found(cid.clone())), - } - .map_err(Error::ics03_connection) - } - - fn connection_channels(&self, _cid: &ConnectionId) -> Result, Error> { - unimplemented!() - } - - fn client_state(&self, client_id: &ClientId) -> Result { - match self.ibc_store.lock().unwrap().clients.get(client_id) { - Some(client_record) => client_record - .client_state - .clone() - .ok_or_else(|| Ics02Error::client_not_found(client_id.clone())), - None => Err(Ics02Error::client_not_found(client_id.clone())), - } - .map_err(|e| Error::ics03_connection(Ics03Error::ics02_client(e))) - } - - fn client_consensus_state( - &self, - client_id: &ClientId, - height: Height, - ) -> Result { - match self.ibc_store.lock().unwrap().clients.get(client_id) { - Some(client_record) => match client_record.consensus_states.get(&height) { - Some(consensus_state) => Ok(consensus_state.clone()), - None => Err(Ics02Error::consensus_state_not_found( - client_id.clone(), - height, - )), - }, - None => Err(Ics02Error::consensus_state_not_found( - client_id.clone(), - height, - )), - } - .map_err(|e| Error::ics03_connection(Ics03Error::ics02_client(e))) - } - - fn get_next_sequence_send( - &self, - port_channel_id: &(PortId, ChannelId), - ) -> Result { - match self - .ibc_store - .lock() - .unwrap() - .next_sequence_send - .get(port_channel_id) - { - Some(sequence) => Ok(*sequence), - None => Err(Error::missing_next_send_seq(port_channel_id.clone())), - } - } - - fn get_next_sequence_recv( - &self, - _port_channel_id: &(PortId, ChannelId), - ) -> Result { - unimplemented!() - } - - fn get_next_sequence_ack( - &self, - _port_channel_id: &(PortId, ChannelId), - ) -> Result { - unimplemented!() - } - - fn get_packet_commitment( - &self, - _key: &(PortId, ChannelId, Sequence), - ) -> Result { - unimplemented!() - } - - fn get_packet_receipt(&self, _key: &(PortId, ChannelId, Sequence)) -> Result { - unimplemented!() - } - - fn get_packet_acknowledgement( - &self, - _key: &(PortId, ChannelId, Sequence), - ) -> Result { - unimplemented!() - } - - fn hash(&self, value: Vec) -> Vec { - use sha2::Digest; - - sha2::Sha256::digest(value).to_vec() - } - - fn host_height(&self) -> Height { - Height::zero() - } - - fn host_consensus_state(&self, _height: Height) -> Result { - unimplemented!() - } - - fn pending_host_consensus_state(&self) -> Result { - unimplemented!() - } - - fn client_update_time( - &self, - _client_id: &ClientId, - _height: Height, - ) -> Result { - unimplemented!() - } - - fn client_update_height( - &self, - _client_id: &ClientId, - _height: Height, - ) -> Result { - unimplemented!() - } - - fn channel_counter(&self) -> Result { - unimplemented!() - } - - fn max_expected_time_per_block(&self) -> Duration { - unimplemented!() - } +impl ClientKeeper for DummyTransferModule { + fn store_client_type( + &mut self, + _client_id: ClientId, + _client_type: ClientType, + ) -> Result<(), Ics02Error> { + todo!() + } + + fn store_client_state( + &mut self, + _client_id: ClientId, + _client_state: Self::AnyClientState, + ) -> Result<(), Ics02Error> { + todo!() + } + + fn store_consensus_state( + &mut self, + _client_id: ClientId, + _height: Height, + _consensus_state: Self::AnyConsensusState, + ) -> Result<(), Ics02Error> { + todo!() + } + + fn increase_client_counter(&mut self) { + todo!() + } + + fn store_update_time( + &mut self, + _client_id: ClientId, + _height: Height, + _timestamp: Timestamp, + ) -> Result<(), Ics02Error> { + todo!() + } + + fn store_update_height( + &mut self, + _client_id: ClientId, + _height: Height, + _host_height: Height, + ) -> Result<(), Ics02Error> { + Ok(()) + } + + fn validate_self_client(&self, _client_state: &Self::AnyClientState) -> Result<(), Ics02Error> { + Ok(()) + } } -impl Ics20Context for DummyTransferModule { - type AccountId = Signer; +impl Ics20Context for DummyTransferModule { + type AccountId = Signer; } + +impl ReaderContext for DummyTransferModule {} diff --git a/modules/src/timestamp.rs b/modules/src/timestamp.rs index 477afe43fa..f6fd217f99 100644 --- a/modules/src/timestamp.rs +++ b/modules/src/timestamp.rs @@ -1,11 +1,13 @@ use crate::prelude::*; -use core::fmt::Display; -use core::hash::{Hash, Hasher}; -use core::num::ParseIntError; -use core::ops::{Add, Sub}; -use core::str::FromStr; -use core::time::Duration; +use core::{ + fmt::Display, + hash::{Hash, Hasher}, + num::ParseIntError, + ops::{Add, Sub}, + str::FromStr, + time::Duration, +}; use flex_error::{define_error, TraceError}; use serde_derive::{Deserialize, Serialize}; @@ -23,17 +25,17 @@ pub const ZERO_DURATION: Duration = Duration::from_secs(0); /// of timestamp. #[derive(PartialEq, Eq, Copy, Clone, Debug, Default, Deserialize, Serialize)] pub struct Timestamp { - time: Option